cdactvm commited on
Commit
8802df4
1 Parent(s): 4b4bebd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -32
app.py CHANGED
@@ -1,41 +1,84 @@
 
 
1
  import gradio as gr
 
 
2
 
3
 
4
- def ask(txt):
5
- print("Asked!")
6
- return "Fun", "Times"
7
 
 
 
 
 
8
 
9
- desc = "Description"
10
- article_text = "Article Text!"
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  callback = gr.CSVLogger()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
 
14
 
15
- with gr.Blocks() as demo:
16
- gr.Markdown(
17
- """
18
- # Maryland Kids Law!
19
- Start typing below to see the output.
20
- """)
21
- txt = gr.Textbox(label="Ask your question:", lines=5,
22
- placeholder="Is it ok to punch a kid?")
23
- txt_2 = gr.Textbox(placeholder="Your Question will appear here.",
24
- label="Output", lines=5)
25
- txt_3 = gr.Textbox(
26
- placeholder="The references cited to answer your question will appear here.", label="References", lines=5)
27
- btn = gr.Button(value="Ask!")
28
- btn.click(ask, inputs=[txt], outputs=[txt_2, txt_3])
29
-
30
- gr.examples = [
31
- ["What a beautiful morning for a walk!"],
32
- ["It was the best of times, it was the worst of times."]
33
- ]
34
- gr.HTML("hello world!")
35
-
36
- callback.setup([txt, txt_2, txt_3], "flagged_data_points")
37
- # We can choose which components to flag -- in this case, we'll flag all of them
38
- btn.click(lambda *args: callback.flag(args),
39
- [txt, txt_2, txt_3], None, preprocess=False)
40
-
41
- demo.launch()
 
1
+ import os
2
+ import warnings
3
  import gradio as gr
4
+ from transformers import pipeline
5
+ import re
6
 
7
 
 
 
 
8
 
9
+ # Initialize the speech recognition pipeline and transliterator
10
+
11
+ #p1 = pipeline(task="automatic-speech-recognition", model="cdactvm/w2v-bert-2.0-odia_v1")
12
+ #p2 = pipeline(task="automatic-speech-recognition", model="cdactvm/w2v-bert-2.0-hindi_v1")
13
 
14
+ HF_TOKEN = os.getenv('HW_TOKEN')
15
+ hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "save_audio")
16
+
17
+
18
+ cur_line=0
19
+
20
+ def readFile():
21
+ f=open('prompt.txt')
22
+ line_num=0
23
+ lines=f.readlines()
24
+ line_num = len(lines)
25
+ return line_num,lines
26
+
27
+ totlines,file_content=readFile()
28
 
29
  callback = gr.CSVLogger()
30
+
31
+ def readPromt():
32
+ global cur_line
33
+ cur_line+=1
34
+ global file_content
35
+ print (cur_line)
36
+ return file_content[cur_line]
37
+
38
+ def readNext():
39
+
40
+ global totlines
41
+ print(totlines)
42
+ global cur_line
43
+ if cur_line<totlines-1:
44
+ cur_line+=1
45
+ global file_content
46
+ print (cur_line)
47
+ return [file_content[cur_line],None]
48
+
49
+ def readPrevious():
50
+ global cur_line
51
+ if cur_line>=0:
52
+ cur_line-=1
53
+ #cur_line=current_line
54
+ global file_content
55
+ print (cur_line)
56
+ return [file_content[cur_line],None]
57
 
58
+ demo = gr.Blocks()
59
 
60
+ with demo:
61
+ #dr=gr.Dropdown(["Hindi","Odiya"],value="Odiya",label="Select Language")
62
+ #audio_file = gr.Audio(sources=["microphone","upload"],type="filepath")
63
+ text = gr.Textbox(readPromt())
64
+ upfile = gr.Audio(
65
+ sources=["microphone","upload"], type="filepath", label="Record"
66
+ )
67
+ #upfile = gr.inputs.Audio(source="upload", type="filepath", label="Upload")
68
+
69
+ with gr.Row():
70
+ b1 = gr.Button("Save")
71
+ b2 = gr.Button("Next")
72
+ b3 = gr.Button("Previous")
73
+ #b4=gr.Button("Clear")
74
+ b2.click(readNext,inputs=None,outputs=[text,upfile])
75
+ b3.click(readPrevious,inputs=None,outputs=[text,upfile])
76
+ #b4.click(lambda: None, outputs=upfile)
77
+ # b1.click(sel_lng, inputs=[dr,mic,upfile], outputs=text)
78
+ #b2.click(text_to_sentiment, inputs=text, outputs=label)
79
+ #callback.setup([text, upfile], "flagged_data_points")
80
+ #callback.setup([text, upfile], hf_writer)
81
+ #b1.click(lambda *args: callback.flag(args), [text, upfile], None, preprocess=False)
82
+ flagging_callback=hf_writer
83
+ b1.click(lambda *args: flagging_callback, [text, upfile], None, preprocess=False)
84
+ demo.launch()