cdactvm commited on
Commit
353a82a
1 Parent(s): 3ff7897

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -25
app.py CHANGED
@@ -2,37 +2,79 @@ import os
2
  import warnings
3
  import gradio as gr
4
  import re
5
- import numpy as np
6
 
7
  HF_TOKEN = os.getenv('HW_TOKEN')
8
  hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "save_audio")
9
 
10
- def sepia(input_img, strength):
11
- sepia_filter = strength * np.array(
12
- [[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]]
13
- ) + (1-strength) * np.identity(3)
14
- sepia_img = input_img.dot(sepia_filter.T)
15
- sepia_img /= sepia_img.max()
16
- return sepia_img
17
 
18
- callback = hf_writer
19
 
20
- with gr.Blocks() as demo:
21
- with gr.Row():
22
- with gr.Column():
23
- img_input = gr.Image()
24
- strength = gr.Slider(0, 1, 0.5)
25
- img_output = gr.Image()
26
- with gr.Row():
27
- btn = gr.Button("Flag")
28
-
29
- # This needs to be called at some point prior to the first call to callback.flag()
30
- callback.setup([img_input, strength, img_output], "flagged_data_points")
31
 
32
- img_input.change(sepia, [img_input, strength], img_output)
33
- strength.change(sepia, [img_input, strength], img_output)
 
 
 
 
 
 
 
 
34
 
35
- # We can choose which components to flag -- in this case, we'll flag all of them
36
- btn.click(lambda *args: callback.flag(args), [img_input, strength, img_output], None, preprocess=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import warnings
3
  import gradio as gr
4
  import re
5
+
6
 
7
  HF_TOKEN = os.getenv('HW_TOKEN')
8
  hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "save_audio")
9
 
 
 
 
 
 
 
 
10
 
11
+ cur_line=-1
12
 
13
+ def readFile():
14
+ f=open('prompt.txt')
15
+ line_num=0
16
+ lines=f.readlines()
17
+ line_num = len(lines)
18
+ return line_num,lines
19
+
20
+ totlines,file_content=readFile()
 
 
 
21
 
22
+ callback = hf_writer
23
+
24
+ def readPromt():
25
+ global cur_line
26
+ cur_line+=1
27
+ global file_content
28
+ print (cur_line)
29
+ return file_content[cur_line]
30
+
31
+ def readNext():
32
 
33
+ global totlines
34
+ print(totlines)
35
+ global cur_line
36
+ if cur_line<totlines-1:
37
+ cur_line+=1
38
+ global file_content
39
+ print (cur_line)
40
+ return [file_content[cur_line],None]
41
+
42
+ def readPrevious():
43
+ global cur_line
44
+ if cur_line>=0:
45
+ cur_line-=1
46
+ #cur_line=current_line
47
+ global file_content
48
+ print (cur_line)
49
+ return [file_content[cur_line],None]
50
 
51
+ demo = gr.Blocks()
52
+
53
+ with demo:
54
+ #dr=gr.Dropdown(["Hindi","Odiya"],value="Odiya",label="Select Language")
55
+ #audio_file = gr.Audio(sources=["microphone","upload"],type="filepath")
56
+ text = gr.Textbox(readPromt())
57
+ #allow_flagging="manual",
58
+ #flagging_callback=hf_writer
59
+ upfile = gr.Audio(
60
+ sources=["microphone","upload"], type="filepath", label="Record"
61
+ )
62
+ #upfile = gr.inputs.Audio(source="upload", type="filepath", label="Upload")
63
+
64
+ with gr.Row():
65
+ b1 = gr.Button("Save")
66
+ b2 = gr.Button("Next")
67
+ b3 = gr.Button("Previous")
68
+ #b4=gr.Button("Clear")
69
+ b2.click(readNext,inputs=None,outputs=[text,upfile])
70
+ b3.click(readPrevious,inputs=None,outputs=[text,upfile])
71
+ #b4.click(lambda: None, outputs=upfile)
72
+ # b1.click(sel_lng, inputs=[dr,mic,upfile], outputs=text)
73
+ #b2.click(text_to_sentiment, inputs=text, outputs=label)
74
+ #callback.setup([text, upfile], "flagged_data_points")
75
+ #callback.setup([text, upfile], hf_writer)
76
+ b1.click(lambda *args: callback.flag(args), [text, upfile], None, preprocess=False)
77
+ #flagging_callback=hf_writer
78
+ #b1.click(lambda *args: hf_writer, [text, upfile], None, preprocess=False)
79
+ #b1.click(lambda *args: hf_writer, [text, upfile])
80
+ demo.launch()