Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Update app.py
Browse files
app.py
CHANGED
@@ -10,6 +10,8 @@ from models import SynthesizerTrn
|
|
10 |
from text import text_to_sequence
|
11 |
from mel_processing import spectrogram_torch
|
12 |
|
|
|
|
|
13 |
|
14 |
def get_text(text, hps):
|
15 |
text_norm = text_to_sequence(text, hps.symbols, hps.data.text_cleaners)
|
@@ -21,7 +23,7 @@ def get_text(text, hps):
|
|
21 |
|
22 |
def create_tts_fn(model, hps, speaker_ids):
|
23 |
def tts_fn(text, speaker, speed):
|
24 |
-
if len(text) > 150:
|
25 |
return "Error: Text is too long", None
|
26 |
speaker_id = speaker_ids[speaker]
|
27 |
stn_tst = get_text(text, hps)
|
@@ -31,6 +33,7 @@ def create_tts_fn(model, hps, speaker_ids):
|
|
31 |
sid = LongTensor([speaker_id])
|
32 |
audio = model.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8,
|
33 |
length_scale=1.0 / speed)[0][0, 0].data.cpu().float().numpy()
|
|
|
34 |
return "Success", (hps.data.sampling_rate, audio)
|
35 |
|
36 |
return tts_fn
|
@@ -42,7 +45,7 @@ def create_vc_fn(model, hps, speaker_ids):
|
|
42 |
return "You need to upload an audio", None
|
43 |
sampling_rate, audio = input_audio
|
44 |
duration = audio.shape[0] / sampling_rate
|
45 |
-
if duration >
|
46 |
return "Error: Audio is too long", None
|
47 |
original_speaker_id = speaker_ids[original_speaker]
|
48 |
target_speaker_id = speaker_ids[target_speaker]
|
@@ -52,17 +55,18 @@ def create_vc_fn(model, hps, speaker_ids):
|
|
52 |
audio = librosa.to_mono(audio.transpose(1, 0))
|
53 |
if sampling_rate != hps.data.sampling_rate:
|
54 |
audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=hps.data.sampling_rate)
|
55 |
-
y = torch.FloatTensor(audio)
|
56 |
-
y = y.unsqueeze(0)
|
57 |
-
spec = spectrogram_torch(y, hps.data.filter_length,
|
58 |
-
hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length,
|
59 |
-
center=False)
|
60 |
-
spec_lengths = LongTensor([spec.size(-1)])
|
61 |
-
sid_src = LongTensor([original_speaker_id])
|
62 |
-
sid_tgt = LongTensor([target_speaker_id])
|
63 |
with no_grad():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
audio = model.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt)[0][
|
65 |
0, 0].data.cpu().float().numpy()
|
|
|
66 |
return "Success", (hps.data.sampling_rate, audio)
|
67 |
|
68 |
return vc_fn
|
@@ -103,10 +107,10 @@ if __name__ == '__main__':
|
|
103 |
with gr.Tabs():
|
104 |
with gr.TabItem("TTS"):
|
105 |
with gr.Tabs():
|
106 |
-
for i, (
|
107 |
with gr.TabItem(f"model{i}"):
|
108 |
with gr.Column():
|
109 |
-
gr.Markdown(f"## {
|
110 |
f"![cover](file/{cover_path})")
|
111 |
tts_input1 = gr.TextArea(label="Text (150 words limitation)", value="γγγ«γ‘γ―γ")
|
112 |
tts_input2 = gr.Dropdown(label="Speaker", choices=speakers,
|
@@ -119,18 +123,19 @@ if __name__ == '__main__':
|
|
119 |
[tts_output1, tts_output2])
|
120 |
with gr.TabItem("Voice Conversion"):
|
121 |
with gr.Tabs():
|
122 |
-
for i, (
|
123 |
with gr.TabItem(f"model{i}"):
|
124 |
-
gr.Markdown(f"## {
|
125 |
f"![cover](file/{cover_path})")
|
126 |
vc_input1 = gr.Dropdown(label="Original Speaker", choices=speakers, type="index",
|
127 |
value=speakers[0])
|
128 |
vc_input2 = gr.Dropdown(label="Target Speaker", choices=speakers, type="index",
|
129 |
value=speakers[1])
|
130 |
-
vc_input3 = gr.Audio(label="Input Audio (
|
131 |
vc_submit = gr.Button("Convert", variant="primary")
|
132 |
vc_output1 = gr.Textbox(label="Output Message")
|
133 |
vc_output2 = gr.Audio(label="Output Audio")
|
134 |
vc_submit.click(vc_fn, [vc_input1, vc_input2, vc_input3], [vc_output1, vc_output2])
|
135 |
|
136 |
-
app.launch()
|
|
|
|
10 |
from text import text_to_sequence
|
11 |
from mel_processing import spectrogram_torch
|
12 |
|
13 |
+
limitation = True # limit text and audio length
|
14 |
+
|
15 |
|
16 |
def get_text(text, hps):
|
17 |
text_norm = text_to_sequence(text, hps.symbols, hps.data.text_cleaners)
|
|
|
23 |
|
24 |
def create_tts_fn(model, hps, speaker_ids):
|
25 |
def tts_fn(text, speaker, speed):
|
26 |
+
if limitation and len(text) > 150:
|
27 |
return "Error: Text is too long", None
|
28 |
speaker_id = speaker_ids[speaker]
|
29 |
stn_tst = get_text(text, hps)
|
|
|
33 |
sid = LongTensor([speaker_id])
|
34 |
audio = model.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8,
|
35 |
length_scale=1.0 / speed)[0][0, 0].data.cpu().float().numpy()
|
36 |
+
del stn_tst, x_tst, x_tst_lengths, sid
|
37 |
return "Success", (hps.data.sampling_rate, audio)
|
38 |
|
39 |
return tts_fn
|
|
|
45 |
return "You need to upload an audio", None
|
46 |
sampling_rate, audio = input_audio
|
47 |
duration = audio.shape[0] / sampling_rate
|
48 |
+
if limitation and duration > 20:
|
49 |
return "Error: Audio is too long", None
|
50 |
original_speaker_id = speaker_ids[original_speaker]
|
51 |
target_speaker_id = speaker_ids[target_speaker]
|
|
|
55 |
audio = librosa.to_mono(audio.transpose(1, 0))
|
56 |
if sampling_rate != hps.data.sampling_rate:
|
57 |
audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=hps.data.sampling_rate)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
with no_grad():
|
59 |
+
y = torch.FloatTensor(audio)
|
60 |
+
y = y.unsqueeze(0)
|
61 |
+
spec = spectrogram_torch(y, hps.data.filter_length,
|
62 |
+
hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length,
|
63 |
+
center=False)
|
64 |
+
spec_lengths = LongTensor([spec.size(-1)])
|
65 |
+
sid_src = LongTensor([original_speaker_id])
|
66 |
+
sid_tgt = LongTensor([target_speaker_id])
|
67 |
audio = model.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt)[0][
|
68 |
0, 0].data.cpu().float().numpy()
|
69 |
+
del y, spec, spec_lengths, sid_src, sid_tgt
|
70 |
return "Success", (hps.data.sampling_rate, audio)
|
71 |
|
72 |
return vc_fn
|
|
|
107 |
with gr.Tabs():
|
108 |
with gr.TabItem("TTS"):
|
109 |
with gr.Tabs():
|
110 |
+
for i, (model_name, cover_path, speakers, tts_fn, vc_fn) in enumerate(models):
|
111 |
with gr.TabItem(f"model{i}"):
|
112 |
with gr.Column():
|
113 |
+
gr.Markdown(f"## {model_name}\n\n"
|
114 |
f"![cover](file/{cover_path})")
|
115 |
tts_input1 = gr.TextArea(label="Text (150 words limitation)", value="γγγ«γ‘γ―γ")
|
116 |
tts_input2 = gr.Dropdown(label="Speaker", choices=speakers,
|
|
|
123 |
[tts_output1, tts_output2])
|
124 |
with gr.TabItem("Voice Conversion"):
|
125 |
with gr.Tabs():
|
126 |
+
for i, (model_name, cover_path, speakers, tts_fn, vc_fn) in enumerate(models):
|
127 |
with gr.TabItem(f"model{i}"):
|
128 |
+
gr.Markdown(f"## {model_name}\n\n"
|
129 |
f"![cover](file/{cover_path})")
|
130 |
vc_input1 = gr.Dropdown(label="Original Speaker", choices=speakers, type="index",
|
131 |
value=speakers[0])
|
132 |
vc_input2 = gr.Dropdown(label="Target Speaker", choices=speakers, type="index",
|
133 |
value=speakers[1])
|
134 |
+
vc_input3 = gr.Audio(label="Input Audio (20s limitation)")
|
135 |
vc_submit = gr.Button("Convert", variant="primary")
|
136 |
vc_output1 = gr.Textbox(label="Output Message")
|
137 |
vc_output2 = gr.Audio(label="Output Audio")
|
138 |
vc_submit.click(vc_fn, [vc_input1, vc_input2, vc_input3], [vc_output1, vc_output2])
|
139 |
|
140 |
+
# app.launch()
|
141 |
+
app.queue(client_position_to_load_data=10).launch()
|