Spaces:
Running
Running
Ryouko65777
commited on
Commit
•
3e4e1cb
1
Parent(s):
eb1e6af
Update app.py
Browse files
app.py
CHANGED
@@ -3,7 +3,8 @@ import os
|
|
3 |
from lib.infer import infer_audio
|
4 |
from pydub import AudioSegment
|
5 |
|
6 |
-
|
|
|
7 |
|
8 |
|
9 |
f0_method = {
|
@@ -28,17 +29,9 @@ f0_method = {
|
|
28 |
# Function for inference
|
29 |
def inference(model_name, audio, f0_change, f0_method, min_pitch, max_pitch, crepe_hop_length,
|
30 |
index_rate, filter_radius, rms_mix_rate, protect, split_infer, min_silence,
|
31 |
-
silence_threshold, seek_step, keep_silence,
|
32 |
f0_autotune, output_format):
|
33 |
|
34 |
-
main_dir = ""
|
35 |
-
os.chdir(main_dir)
|
36 |
-
|
37 |
-
# Save the uploaded audio file
|
38 |
-
audio_path = "uploaded_audio.wav"
|
39 |
-
audio.save(audio_path)
|
40 |
-
|
41 |
-
os.system("chmod +x stftpitchshift")
|
42 |
|
43 |
# Perform inference
|
44 |
inferred_audio = infer_audio(
|
@@ -58,7 +51,6 @@ def inference(model_name, audio, f0_change, f0_method, min_pitch, max_pitch, cre
|
|
58 |
silence_threshold,
|
59 |
seek_step,
|
60 |
keep_silence,
|
61 |
-
formant_shift,
|
62 |
quefrency,
|
63 |
timbre,
|
64 |
f0_autotune,
|
@@ -96,7 +88,6 @@ with gr.Blocks() as demo:
|
|
96 |
silence_threshold = gr.Number(label="Silence Threshold (dB)", value=-50)
|
97 |
seek_step = gr.Slider(label="Seek Step (ms)", minimum=1, maximum=10, value=1)
|
98 |
keep_silence = gr.Number(label="Keep Silence (ms)", value=200)
|
99 |
-
formant_shift = gr.Checkbox(label="Enable Formant Shift", value=False)
|
100 |
quefrency = gr.Number(label="Quefrency", value=0)
|
101 |
timbre = gr.Number(label="Timbre", value=1)
|
102 |
f0_autotune = gr.Checkbox(label="Enable F0 Autotune", value=False)
|
@@ -112,7 +103,7 @@ with gr.Blocks() as demo:
|
|
112 |
inputs=[model_name, audio_input, f0_change, f0_method, min_pitch, max_pitch,
|
113 |
crepe_hop_length, index_rate, filter_radius, rms_mix_rate, protect,
|
114 |
split_infer, min_silence, silence_threshold, seek_step, keep_silence,
|
115 |
-
|
116 |
outputs=output_audio)
|
117 |
|
118 |
# Launch the demo
|
|
|
3 |
from lib.infer import infer_audio
|
4 |
from pydub import AudioSegment
|
5 |
|
6 |
+
main_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
7 |
+
|
8 |
|
9 |
|
10 |
f0_method = {
|
|
|
29 |
# Function for inference
|
30 |
def inference(model_name, audio, f0_change, f0_method, min_pitch, max_pitch, crepe_hop_length,
|
31 |
index_rate, filter_radius, rms_mix_rate, protect, split_infer, min_silence,
|
32 |
+
silence_threshold, seek_step, keep_silence, quefrency, timbre,
|
33 |
f0_autotune, output_format):
|
34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
# Perform inference
|
37 |
inferred_audio = infer_audio(
|
|
|
51 |
silence_threshold,
|
52 |
seek_step,
|
53 |
keep_silence,
|
|
|
54 |
quefrency,
|
55 |
timbre,
|
56 |
f0_autotune,
|
|
|
88 |
silence_threshold = gr.Number(label="Silence Threshold (dB)", value=-50)
|
89 |
seek_step = gr.Slider(label="Seek Step (ms)", minimum=1, maximum=10, value=1)
|
90 |
keep_silence = gr.Number(label="Keep Silence (ms)", value=200)
|
|
|
91 |
quefrency = gr.Number(label="Quefrency", value=0)
|
92 |
timbre = gr.Number(label="Timbre", value=1)
|
93 |
f0_autotune = gr.Checkbox(label="Enable F0 Autotune", value=False)
|
|
|
103 |
inputs=[model_name, audio_input, f0_change, f0_method, min_pitch, max_pitch,
|
104 |
crepe_hop_length, index_rate, filter_radius, rms_mix_rate, protect,
|
105 |
split_infer, min_silence, silence_threshold, seek_step, keep_silence,
|
106 |
+
quefrency, timbre, f0_autotune, output_format],
|
107 |
outputs=output_audio)
|
108 |
|
109 |
# Launch the demo
|