Mahiruoshi
commited on
Commit
•
3baf9e8
1
Parent(s):
f4e2535
Update app.py
Browse files
app.py
CHANGED
@@ -221,9 +221,10 @@ def create_tts_fn(net_g,hps,speaker_id):
|
|
221 |
for i in range(repeat_time):
|
222 |
cmd = 'ffmpeg -y -i ' + audiopath + '.wav' + ' -ar 44100 '+ audiopath.replace('temp','temp'+str(i))
|
223 |
os.system(cmd)
|
224 |
-
|
225 |
except:
|
226 |
pass
|
|
|
227 |
return (hps.data.sampling_rate, np.concatenate(audio_fin)),file_path,htm
|
228 |
return tts_fn
|
229 |
|
@@ -267,6 +268,19 @@ if __name__ == '__main__':
|
|
267 |
for (sid, name, title, example, tts_fn) in models[schools.index(i)]:
|
268 |
with gr.TabItem(name):
|
269 |
with gr.Column():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
270 |
with gr.Accordion(label="Setting", open=False):
|
271 |
input2 = gr.Dropdown(label="Language", choices=lan, value="自动", interactive=True)
|
272 |
input3 = gr.Checkbox(value=False, label="长句切割(小说合成)")
|
@@ -281,18 +295,7 @@ if __name__ == '__main__':
|
|
281 |
audio_input1 = gr.Checkbox(value=False, label="修改音频路径(live2d)")
|
282 |
audio_input2 = gr.TextArea(label="音频路径",lines=1,value = '示范:D:/app_develop/live2d_whole/2010002/sounds/temp.wav')
|
283 |
|
284 |
-
|
285 |
-
with gr.Row():
|
286 |
-
gr.Markdown(
|
287 |
-
'<div align="center">'
|
288 |
-
f'<img style="width:auto;height:400px;" src="file/image/{name}.png">'
|
289 |
-
'</div>'
|
290 |
-
)
|
291 |
-
with gr.Row():
|
292 |
-
output_UI = gr.outputs.HTML()
|
293 |
-
input1 = gr.TextArea(label="Text", value=example, lines=1)
|
294 |
-
btnVC = gr.Button("Submit")
|
295 |
-
output1 = gr.Audio(label="采样率22050")
|
296 |
btnVC.click(tts_fn, inputs=[api_input1,api_input2,audio_input1,audio_input2,audio_input3,input1,input2,input3,input4,input5,input6], outputs=[output1,output2,output_UI])
|
297 |
|
298 |
app.launch()
|
|
|
221 |
for i in range(repeat_time):
|
222 |
cmd = 'ffmpeg -y -i ' + audiopath + '.wav' + ' -ar 44100 '+ audiopath.replace('temp','temp'+str(i))
|
223 |
os.system(cmd)
|
224 |
+
|
225 |
except:
|
226 |
pass
|
227 |
+
file_path = "subtitles.srt"
|
228 |
return (hps.data.sampling_rate, np.concatenate(audio_fin)),file_path,htm
|
229 |
return tts_fn
|
230 |
|
|
|
268 |
for (sid, name, title, example, tts_fn) in models[schools.index(i)]:
|
269 |
with gr.TabItem(name):
|
270 |
with gr.Column():
|
271 |
+
with gr.Row():
|
272 |
+
with gr.Row():
|
273 |
+
gr.Markdown(
|
274 |
+
'<div align="center">'
|
275 |
+
f'<img style="width:auto;height:400px;" src="file/image/{name}.png">'
|
276 |
+
'</div>'
|
277 |
+
)
|
278 |
+
with gr.Accordion(label="聊天记录", open=False):
|
279 |
+
with gr.Row():
|
280 |
+
output_UI = gr.outputs.HTML()
|
281 |
+
input1 = gr.TextArea(label="Text", value=example, lines=1)
|
282 |
+
btnVC = gr.Button("Submit")
|
283 |
+
output1 = gr.Audio(label="采样率22050")
|
284 |
with gr.Accordion(label="Setting", open=False):
|
285 |
input2 = gr.Dropdown(label="Language", choices=lan, value="自动", interactive=True)
|
286 |
input3 = gr.Checkbox(value=False, label="长句切割(小说合成)")
|
|
|
295 |
audio_input1 = gr.Checkbox(value=False, label="修改音频路径(live2d)")
|
296 |
audio_input2 = gr.TextArea(label="音频路径",lines=1,value = '示范:D:/app_develop/live2d_whole/2010002/sounds/temp.wav')
|
297 |
|
298 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
299 |
btnVC.click(tts_fn, inputs=[api_input1,api_input2,audio_input1,audio_input2,audio_input3,input1,input2,input3,input4,input5,input6], outputs=[output1,output2,output_UI])
|
300 |
|
301 |
app.launch()
|