Spaces:
Runtime error
Runtime error
Upload 2 files
Browse filesMaybe like this?
VSCode tells me about syntax errors in the code.
- app.py +162 -168
- dowoad_param.py +7 -5
app.py
CHANGED
@@ -1,168 +1,162 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from rvc_infer import infer_audio
|
3 |
-
import os
|
4 |
-
import re
|
5 |
-
import random
|
6 |
-
from scipy.io.wavfile import write
|
7 |
-
from scipy.io.wavfile import read
|
8 |
-
import numpy as np
|
9 |
-
import yt_dlp
|
10 |
-
import subprocess
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
def update_models_list():
|
23 |
-
models_l = get_current_models(rvc_models_dir)
|
24 |
-
return gr.update(choices=models_l)
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
""
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
gr.
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
gr.
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
show_api=True,
|
164 |
-
show_progress="full",
|
165 |
-
)
|
166 |
-
|
167 |
-
demo.queue()
|
168 |
-
demo.launch(debug=True,share=True,show_api=False)
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from rvc_infer import infer_audio, get_current_models
|
3 |
+
import os
|
4 |
+
import re
|
5 |
+
import random
|
6 |
+
from scipy.io.wavfile import write
|
7 |
+
from scipy.io.wavfile import read
|
8 |
+
import numpy as np
|
9 |
+
import yt_dlp
|
10 |
+
import subprocess
|
11 |
+
import zipfile
|
12 |
+
import shutil
|
13 |
+
import urllib
|
14 |
+
|
15 |
+
print("downloading RVC models")
|
16 |
+
os.system("python dowoad_param.py")
|
17 |
+
|
18 |
+
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
19 |
+
|
20 |
+
rvc_models_dir = os.path.join(BASE_DIR, 'models')
|
21 |
+
|
22 |
+
def update_models_list():
|
23 |
+
models_l = get_current_models(rvc_models_dir)
|
24 |
+
return gr.update(choices=models_l)
|
25 |
+
|
26 |
+
def extract_zip(extraction_folder, zip_name):
|
27 |
+
os.makedirs(extraction_folder)
|
28 |
+
with zipfile.ZipFile(zip_name, 'r') as zip_ref:
|
29 |
+
zip_ref.extractall(extraction_folder)
|
30 |
+
os.remove(zip_name)
|
31 |
+
|
32 |
+
index_filepath, model_filepath = None, None
|
33 |
+
for root, dirs, files in os.walk(extraction_folder):
|
34 |
+
for name in files:
|
35 |
+
if name.endswith('.index') and os.stat(os.path.join(root, name)).st_size > 1024 * 100:
|
36 |
+
index_filepath = os.path.join(root, name)
|
37 |
+
|
38 |
+
if name.endswith('.pth') and os.stat(os.path.join(root, name)).st_size > 1024 * 1024 * 40:
|
39 |
+
model_filepath = os.path.join(root, name)
|
40 |
+
|
41 |
+
if not model_filepath:
|
42 |
+
raise gr.Error(f'No .pth model file was found in the extracted zip. Please check {extraction_folder}.')
|
43 |
+
|
44 |
+
# move model and index file to extraction folder
|
45 |
+
os.rename(model_filepath, os.path.join(extraction_folder, os.path.basename(model_filepath)))
|
46 |
+
if index_filepath:
|
47 |
+
os.rename(index_filepath, os.path.join(extraction_folder, os.path.basename(index_filepath)))
|
48 |
+
|
49 |
+
# remove any unnecessary nested folders
|
50 |
+
for filepath in os.listdir(extraction_folder):
|
51 |
+
if os.path.isdir(os.path.join(extraction_folder, filepath)):
|
52 |
+
shutil.rmtree(os.path.join(extraction_folder, filepath))
|
53 |
+
|
54 |
+
def download_online_model(url, dir_name, progress=gr.Progress()):
|
55 |
+
try:
|
56 |
+
progress(0, desc=f'[~] Downloading voice model with name {dir_name}...')
|
57 |
+
zip_name = url.split('/')[-1]
|
58 |
+
extraction_folder = os.path.join(rvc_models_dir, dir_name)
|
59 |
+
if os.path.exists(extraction_folder):
|
60 |
+
raise gr.Error(f'Voice model directory {dir_name} already exists! Choose a different name for your voice model.')
|
61 |
+
|
62 |
+
if 'pixeldrain.com' in url:
|
63 |
+
url = f'https://pixeldrain.com/api/file/{zip_name}'
|
64 |
+
|
65 |
+
urllib.request.urlretrieve(url, zip_name)
|
66 |
+
|
67 |
+
progress(0.5, desc='[~] Extracting zip...')
|
68 |
+
extract_zip(extraction_folder, zip_name)
|
69 |
+
return f'[+] {dir_name} Model successfully downloaded!'
|
70 |
+
|
71 |
+
except Exception as e:
|
72 |
+
raise gr.Error(str(e))
|
73 |
+
|
74 |
+
def download_audio(url):
|
75 |
+
ydl_opts = {
|
76 |
+
'format': 'bestaudio/best',
|
77 |
+
'outtmpl': 'ytdl/%(title)s.%(ext)s',
|
78 |
+
'postprocessors': [{
|
79 |
+
'key': 'FFmpegExtractAudio',
|
80 |
+
'preferredcodec': 'wav',
|
81 |
+
'preferredquality': '192',
|
82 |
+
}],
|
83 |
+
}
|
84 |
+
|
85 |
+
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
86 |
+
info_dict = ydl.extract_info(url, download=True)
|
87 |
+
file_path = ydl.prepare_filename(info_dict).rsplit('.', 1)[0] + '.wav'
|
88 |
+
sample_rate, audio_data = read(file_path)
|
89 |
+
audio_array = np.asarray(audio_data, dtype=np.int16)
|
90 |
+
|
91 |
+
return sample_rate, audio_array
|
92 |
+
|
93 |
+
|
94 |
+
CSS = """
|
95 |
+
"""
|
96 |
+
|
97 |
+
with gr.Blocks(theme="Hev832/Applio", fill_width=True, css=CSS) as demo:
|
98 |
+
gr.Markdown("# RVC INFER DEMOS ")
|
99 |
+
gr.Markdown(f"# recommended using colab version with more feature!<br> [![Open In Collab](https://img.shields.io/badge/google_colab-F9AB00?style=flat-square&logo=googlecolab&logoColor=white)](https://colab.research.google.com/drive/1bM1LB2__WNFxX8pyZmUPQZYq7dg58YWG?usp=sharing) ")
|
100 |
+
with gr.Tab("Inferenece"):
|
101 |
+
gr.Markdown("in progress")
|
102 |
+
model_name = gr.Dropdown(label='Voice Models', info='Models folder "rvc_infer --> models". After new models are added into this folder, click the refresh button')
|
103 |
+
ref_btn = gr.Button('Refresh Models', variant='primary')
|
104 |
+
input_audio = gr.Audio(label="Input Audio", type="filepath")
|
105 |
+
with gr.Accordion("Settings", open=False):
|
106 |
+
f0_change = gr.Slider(label="f0 change", minimum=-12, maximum=12, step=1, value=0)
|
107 |
+
f0_method = gr.Dropdown(label="f0 method", choices=["rmvpe+", "rmvpe", "fcpe", " hybrid[rmvpe+fcpe]"], value="rmvpe+")
|
108 |
+
min_pitch = gr.Textbox(label="min pitch", lines=1, value="-12")
|
109 |
+
max_pitch = gr.Textbox(label="max pitch", lines=1, value="12")
|
110 |
+
crepe_hop_length = gr.Slider(label="crepe_hop_length", minimum=0, maximum=256, step=1, value=128)
|
111 |
+
index_rate = gr.Slider(label="index_rate", minimum=0, maximum=1.0, step=0.01, value=0.75)
|
112 |
+
filter_radius = gr.Slider(label="filter_radius", minimum=0, maximum=10.0, step=0.01, value=3)
|
113 |
+
rms_mix_rate = gr.Slider(label="rms_mix_rate", minimum=0, maximum=1.0, step=0.01, value=0.25)
|
114 |
+
protect = gr.Slider(label="protect", minimum=0, maximum=1.0, step=0.01, value=0.33)
|
115 |
+
with gr.Accordion("Advanced Settings", open=False):
|
116 |
+
split_infer = gr.Checkbox(label="split_infer", value=False)
|
117 |
+
min_silence = gr.Slider(label="min_silence", minimum=0, maximum=1000, step=1, value=500)
|
118 |
+
silence_threshold = gr.Slider(label="silence_threshold", minimum=-1000, maximum=1000, step=1, value=-50)
|
119 |
+
seek_step = gr.Slider(label="seek_step", minimum=0, maximum=100, step=1, value=0)
|
120 |
+
keep_silence = gr.Slider(label="keep_silence", minimum=-1000, maximum=1000, step=1, value=100)
|
121 |
+
do_formant = gr.Checkbox(label="do_formant", value=False)
|
122 |
+
quefrency = gr.Slider(label="quefrency", minimum=0, maximum=100, step=1, value=0)
|
123 |
+
timbre = gr.Slider(label="timbre", minimum=0, maximum=100, step=1, value=1)
|
124 |
+
f0_autotune = gr.Checkbox(label="f0_autotune", value=False)
|
125 |
+
audio_format = gr.Dropdown(label="audio_format", choices=["wav"], value="wav", visible=False)
|
126 |
+
resample_sr = gr.Slider(label="resample_sr", minimum=0, maximum=100, step=1, value=0)
|
127 |
+
hubert_model_path = gr.Textbox(label="hubert_model_path", lines=1, value="hubert_base.pt", visible=False)
|
128 |
+
rmvpe_model_path = gr.Textbox(label="rmvpe_model_path", lines=1, value="rmvpe.pt", visible=False)
|
129 |
+
fcpe_model_path = gr.Textbox(label="fcpe_model_path", lines=1, value="fcpe.pt", visible=False)
|
130 |
+
submit_inference = gr.Button('Inference', variant='primary')
|
131 |
+
result_audio = gr.Audio("Output Audio")
|
132 |
+
|
133 |
+
with gr.Tab("Download Model"):
|
134 |
+
gr.Markdown("## Download Model for infernece")
|
135 |
+
url_input = gr.Textbox(label="Model URL", placeholder="Enter the URL of the model")
|
136 |
+
dir_name_input = gr.Textbox(label="Directory Name", placeholder="Enter the directory name")
|
137 |
+
output = gr.Textbox(label="Output Models")
|
138 |
+
download_button = gr.Button("Download Model")
|
139 |
+
download_button.click(download_online_model, inputs=[url_input, dir_name_input], outputs=output)
|
140 |
+
|
141 |
+
with gr.Tab(" Credits"):
|
142 |
+
gr.Markdown(
|
143 |
+
"""
|
144 |
+
this project made by [Blane187](https://huggingface.co/Blane187) with Improvements by [John6666](https://huggingfce.co/John6666)
|
145 |
+
""")
|
146 |
+
|
147 |
+
ref_btn.click(update_models_list, None, outputs=model_name)
|
148 |
+
gr.on(
|
149 |
+
triggers=[submit_inference.click],
|
150 |
+
fn=infer_audio,
|
151 |
+
inputs=[model_name, input_audio, f0_change, f0_method, min_pitch, max_pitch, crepe_hop_length, index_rate,
|
152 |
+
filter_radius, rms_mix_rate, protect, split_infer, min_silence, silence_threshold, seek_step,
|
153 |
+
keep_silence, do_formant, quefrency, timbre, f0_autotune, audio_format, resample_sr,
|
154 |
+
hubert_model_path, rmvpe_model_path, fcpe_model_path],
|
155 |
+
outputs=[result_audio],
|
156 |
+
queue=True,
|
157 |
+
show_api=True,
|
158 |
+
show_progress="full",
|
159 |
+
)
|
160 |
+
|
161 |
+
demo.queue()
|
162 |
+
demo.launch(debug=True,share=True,show_api=False)
|
|
|
|
|
|
|
|
|
|
|
|
dowoad_param.py
CHANGED
@@ -1,6 +1,8 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
os.system("wget
|
5 |
-
|
|
|
|
|
6 |
dowoad_param()
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
def dowoad_param():
|
4 |
+
os.system("wget https://huggingface.co/datasets/ylzz1997/rmvpe_pretrain_model/resolve/main/fcpe.pt -O fcpe.pt")
|
5 |
+
os.system("wget https://huggingface.co/Kit-Lemonfoot/RVC_DidntAsk/resolve/main/hubert_base.pt -O hubert_base.pt")
|
6 |
+
os.system("wget https://huggingface.co/Kit-Lemonfoot/RVC_DidntAsk/resolve/main/rmvpe.pt -O rmvpe.pt")
|
7 |
+
|
8 |
dowoad_param()
|