Files changed (1) hide show
  1. app.py +100 -85
app.py CHANGED
@@ -1,85 +1,100 @@
1
- import gradio as gr
2
- from rvc_infer import download_online_model
3
- import os
4
- import re
5
- import random
6
- from scipy.io.wavfile import write
7
- from scipy.io.wavfile import read
8
- import numpy as np
9
- import yt_dlp
10
- import subprocess
11
-
12
-
13
-
14
-
15
- def download_model(url, dir_name):
16
- output_models = download_online_model(url, dir_name)
17
- return dir_name
18
-
19
-
20
-
21
-
22
-
23
-
24
-
25
-
26
-
27
- def download_audio(url):
28
- ydl_opts = {
29
- 'format': 'bestaudio/best',
30
- 'outtmpl': 'ytdl/%(title)s.%(ext)s',
31
- 'postprocessors': [{
32
- 'key': 'FFmpegExtractAudio',
33
- 'preferredcodec': 'wav',
34
- 'preferredquality': '192',
35
- }],
36
- }
37
-
38
- with yt_dlp.YoutubeDL(ydl_opts) as ydl:
39
- info_dict = ydl.extract_info(url, download=True)
40
- file_path = ydl.prepare_filename(info_dict).rsplit('.', 1)[0] + '.wav'
41
- sample_rate, audio_data = read(file_path)
42
- audio_array = np.asarray(audio_data, dtype=np.int16)
43
-
44
- return sample_rate, audio_array
45
-
46
-
47
-
48
-
49
-
50
-
51
-
52
- CSS = """
53
- """
54
-
55
- with gr.Blocks(theme="Hev832/Applio", fill_width=True, css=CSS) as demo:
56
-
57
- with gr.Tabs():
58
- with gr.Tab("inferenece"):
59
- gr.Markdown("in progress")
60
- with gr.Tab("Download model"):
61
- gr.Markdown("## Download Model for infernece")
62
- url_input = gr.Textbox(label="Model URL", placeholder="Enter the URL of the model")
63
- dir_name_input = gr.Textbox(label="Directory Name", placeholder="Enter the directory name")
64
-
65
- download_button = gr.Button("Download Model")
66
- download_button.click(download_model, inputs=[url_input, dir_name_input], outputs=url_input)
67
-
68
-
69
- with gr.Tab(" Credits"):
70
- gr.Markdown(
71
- """
72
- this project made by [Blane187](https://huggingface.co/Blane187) with Improvements by [John6666](https://huggingfce.co/John6666)
73
- """)
74
-
75
-
76
-
77
-
78
-
79
-
80
-
81
-
82
-
83
-
84
-
85
- demo.launch(debug=True,show_api=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from rvc_infer import download_online_model, infer_audio
3
+ import os
4
+ import re
5
+ import random
6
+ from scipy.io.wavfile import write
7
+ from scipy.io.wavfile import read
8
+ import numpy as np
9
+ import yt_dlp
10
+ import subprocess
11
+
12
+ def download_model(url, dir_name):
13
+ output_models = download_online_model(url, dir_name)
14
+ return output_models
15
+
16
+
17
+ def download_audio(url):
18
+ ydl_opts = {
19
+ 'format': 'bestaudio/best',
20
+ 'outtmpl': 'ytdl/%(title)s.%(ext)s',
21
+ 'postprocessors': [{
22
+ 'key': 'FFmpegExtractAudio',
23
+ 'preferredcodec': 'wav',
24
+ 'preferredquality': '192',
25
+ }],
26
+ }
27
+
28
+ with yt_dlp.YoutubeDL(ydl_opts) as ydl:
29
+ info_dict = ydl.extract_info(url, download=True)
30
+ file_path = ydl.prepare_filename(info_dict).rsplit('.', 1)[0] + '.wav'
31
+ sample_rate, audio_data = read(file_path)
32
+ audio_array = np.asarray(audio_data, dtype=np.int16)
33
+
34
+ return sample_rate, audio_array
35
+
36
+
37
+ CSS = """
38
+ """
39
+
40
+ with gr.Blocks(theme="Hev832/Applio", fill_width=True, css=CSS) as demo:
41
+ with gr.Tab("Inferenece"):
42
+ gr.Markdown("in progress")
43
+ model_name = gr.Textbox(label="Model Name #", lines=1, value="")
44
+ input_audio = gr.Audio(label="Input Audio #", type="filepath")
45
+ f0_change = gr.Slider(label="f0 change #", minimum=0, maximum=10, step=1, value=0)
46
+ f0_method = gr.Dropdown(label="f0 method #", choices=["rmvpe+"], value="rmvpe+")
47
+ min_pitch = gr.Textbox(label="min pitch #", lines=1, value="50")
48
+ max_pitch = gr.Textbox(label="max pitch #", lines=1, value="1100")
49
+ crepe_hop_length = gr.Slider(label="crepe_hop_length #", minimum=0, maximum=256, step=1, value=128)
50
+ index_rate = gr.Slider(label="index_rate #", minimum=0, maximum=1.0, step=0.01, value=0.75)
51
+ filter_radius = gr.Slider(label="filter_radius #", minimum=0, maximum=10.0, step=0.01, value=3)
52
+ rms_mix_rate = gr.Slider(label="rms_mix_rate #", minimum=0, maximum=1.0, step=0.01, value=0.25)
53
+ protect = gr.Slider(label="protect #", minimum=0, maximum=1.0, step=0.01, value=0.33)
54
+ split_infer = gr.Checkbox(label="split_infer #", value=False)
55
+ min_silence = gr.Slider(label="min_silence #", minimum=0, maximum=1000, step=1, value=500)
56
+ silence_threshold = gr.Slider(label="silence_threshold #", minimum=-1000, maximum=1000, step=1, value=-50)
57
+ seek_step = gr.Slider(label="seek_step #", minimum=0, maximum=100, step=1, value=0)
58
+ keep_silence = gr.Slider(label="keep_silence #", minimum=-1000, maximum=1000, step=1, value=100)
59
+ do_formant = gr.Checkbox(label="do_formant #", value=False)
60
+ quefrency = gr.Slider(label="quefrency #", minimum=0, maximum=100, step=1, value=0)
61
+ timbre = gr.Slider(label="timbre #", minimum=0, maximum=100, step=1, value=1)
62
+ f0_autotune = gr.Checkbox(label="f0_autotune #", value=False)
63
+ audio_format = gr.Dropdown(label="audio_format #", choices=["wav"], value="wav")
64
+ resample_sr = gr.Slider(label="resample_sr #", minimum=0, maximum=100, step=1, value=0)
65
+ hubert_model_path = gr.Textbox(label="hubert_model_pathe #", lines=1, value="hubert_base.pt")
66
+ rmvpe_model_path = gr.Textbox(label="rmvpe_model_path #", lines=1, value="rmvpe.pt")
67
+ fcpe_model_path = gr.Textbox(label="fcpe_model_path #", lines=1, value="fcpe.pt")
68
+ submit_inference = gr.Button('Inference #', variant='primary')
69
+ result_audio = gr.Audio("Output Audio #", type="filepath")
70
+
71
+ with gr.Tab("Download Model"):
72
+ gr.Markdown("## Download Model for infernece")
73
+ url_input = gr.Textbox(label="Model URL", placeholder="Enter the URL of the model")
74
+ dir_name_input = gr.Textbox(label="Directory Name", placeholder="Enter the directory name")
75
+ output = gr.Textbox(label="Output Models")
76
+ download_button = gr.Button("Download Model")
77
+ download_button.click(download_model, inputs=[url_input, dir_name_input], outputs=output)
78
+
79
+ with gr.Tab(" Credits"):
80
+ gr.Markdown(
81
+ """
82
+ this project made by [Blane187](https://huggingface.co/Blane187) with Improvements by [John6666](https://huggingfce.co/John6666)
83
+ """)
84
+
85
+
86
+ gr.on(
87
+ triggers=[submit_inference.click],
88
+ fn=infer_audio,
89
+ inputs=[model_name, input_audio, f0_change, f0_method, min_pitch, max_pitch, crepe_hop_length, index_rate,
90
+ filter_radius, rms_mix_rate, protect, split_infer, min_silence, silence_threshold, seek_step,
91
+ keep_silence, do_formant, quefrency, timbre, f0_autotune, audio_format, resample_sr,
92
+ hubert_model_path, rmvpe_model_path, fcpe_model_path],
93
+ outputs=[result_audio],
94
+ queue=True,
95
+ show_api=True,
96
+ show_progress="full",
97
+ )
98
+
99
+ demo.queue()
100
+ demo.launch(debug=True,show_api=False)