Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
add models
Browse files- app.py +88 -73
- saved_model/0/cover.jpg +3 -0
- saved_model/1/cover.jpg +3 -0
- saved_model/2/config.json +3 -0
- saved_model/2/cover.jpg +3 -0
- saved_model/2/model.pth +3 -0
- saved_model/3/config.json +3 -0
- saved_model/3/cover.jpg +3 -0
- saved_model/3/model.pth +3 -0
- saved_model/names.json +3 -0
app.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import os
|
2 |
|
3 |
os.system('cd monotonic_align && python setup.py build_ext --inplace && cd ..')
|
@@ -22,62 +23,64 @@ def get_text(text, hps):
|
|
22 |
return text_norm
|
23 |
|
24 |
|
25 |
-
def
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
|
|
|
|
71 |
|
72 |
|
73 |
if __name__ == '__main__':
|
74 |
models = []
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
for i in range(0, 2):
|
79 |
config_path = f"saved_model/{i}/config.json"
|
80 |
model_path = f"saved_model/{i}/model.pth"
|
|
|
81 |
hps = utils.get_hparams_from_file(config_path)
|
82 |
model = SynthesizerTrn(
|
83 |
len(hps.symbols),
|
@@ -87,38 +90,50 @@ if __name__ == '__main__':
|
|
87 |
**hps.model)
|
88 |
utils.load_checkpoint(model_path, model, None)
|
89 |
model.eval()
|
90 |
-
|
91 |
-
speakers =
|
92 |
-
|
93 |
-
|
|
|
94 |
|
95 |
app = gr.Blocks()
|
96 |
|
97 |
with app:
|
98 |
gr.Markdown("# Moe Japanese TTS And Voice Conversion Using VITS Model\n\n"
|
99 |
"![visitor badge](https://visitor-badge.glitch.me/badge?page_id=skytnt.moegoe)\n\n"
|
100 |
-
"unofficial demo for
|
|
|
|
|
101 |
)
|
102 |
with gr.Tabs():
|
103 |
with gr.TabItem("TTS"):
|
104 |
-
with gr.
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
with gr.TabItem("Voice Conversion"):
|
111 |
-
with gr.
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
|
|
|
|
123 |
|
124 |
app.launch()
|
|
|
1 |
+
import json
|
2 |
import os
|
3 |
|
4 |
os.system('cd monotonic_align && python setup.py build_ext --inplace && cd ..')
|
|
|
23 |
return text_norm
|
24 |
|
25 |
|
26 |
+
def create_tts_fn(model, hps, speaker_ids):
|
27 |
+
def tts_fn(text, speaker):
|
28 |
+
if len(text) > 150:
|
29 |
+
return "Error: Text is too long", None
|
30 |
+
speaker_id = speaker_ids[speaker]
|
31 |
+
stn_tst = get_text(text, hps)
|
32 |
+
with no_grad():
|
33 |
+
x_tst = stn_tst.unsqueeze(0)
|
34 |
+
x_tst_lengths = LongTensor([stn_tst.size(0)])
|
35 |
+
sid = LongTensor([speaker_id])
|
36 |
+
audio = \
|
37 |
+
model.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][
|
38 |
+
0, 0].data.cpu().float().numpy()
|
39 |
+
return "Success", (hps.data.sampling_rate, audio)
|
40 |
+
|
41 |
+
return tts_fn
|
42 |
+
|
43 |
+
|
44 |
+
def create_vc_fn(model, hps, speaker_ids):
|
45 |
+
def vc_fn(original_speaker, target_speaker, input_audio):
|
46 |
+
if input_audio is None:
|
47 |
+
return "You need to upload an audio", None
|
48 |
+
sampling_rate, audio = input_audio
|
49 |
+
duration = audio.shape[0] / sampling_rate
|
50 |
+
if duration > 30:
|
51 |
+
return "Error: Audio is too long", None
|
52 |
+
original_speaker_id = speaker_ids[original_speaker]
|
53 |
+
target_speaker_id = speaker_ids[target_speaker]
|
54 |
+
|
55 |
+
audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
|
56 |
+
if len(audio.shape) > 1:
|
57 |
+
audio = librosa.to_mono(audio.transpose(1, 0))
|
58 |
+
if sampling_rate != hps.data.sampling_rate:
|
59 |
+
audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=hps.data.sampling_rate)
|
60 |
+
y = torch.FloatTensor(audio)
|
61 |
+
y = y.unsqueeze(0)
|
62 |
+
spec = spectrogram_torch(y, hps.data.filter_length,
|
63 |
+
hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length,
|
64 |
+
center=False)
|
65 |
+
spec_lengths = LongTensor([spec.size(-1)])
|
66 |
+
sid_src = LongTensor([original_speaker_id])
|
67 |
+
sid_tgt = LongTensor([target_speaker_id])
|
68 |
+
with no_grad():
|
69 |
+
audio = model.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt)[0][
|
70 |
+
0, 0].data.cpu().float().numpy()
|
71 |
+
return "Success", (hps.data.sampling_rate, audio)
|
72 |
+
|
73 |
+
return vc_fn
|
74 |
|
75 |
|
76 |
if __name__ == '__main__':
|
77 |
models = []
|
78 |
+
with open("saved_model/names.json", "r", encoding="utf-8") as f:
|
79 |
+
models_names = json.load(f)
|
80 |
+
for i, models_name in models_names.items():
|
|
|
81 |
config_path = f"saved_model/{i}/config.json"
|
82 |
model_path = f"saved_model/{i}/model.pth"
|
83 |
+
cover_path = f"saved_model/{i}/cover.jpg"
|
84 |
hps = utils.get_hparams_from_file(config_path)
|
85 |
model = SynthesizerTrn(
|
86 |
len(hps.symbols),
|
|
|
90 |
**hps.model)
|
91 |
utils.load_checkpoint(model_path, model, None)
|
92 |
model.eval()
|
93 |
+
speaker_ids = [sid for sid, name in enumerate(hps.speakers) if name != "None"]
|
94 |
+
speakers = [name for sid, name in enumerate(hps.speakers) if name != "None"]
|
95 |
+
|
96 |
+
models.append((models_name, cover_path, speakers,
|
97 |
+
create_tts_fn(model, hps, speaker_ids), create_vc_fn(model, hps, speaker_ids)))
|
98 |
|
99 |
app = gr.Blocks()
|
100 |
|
101 |
with app:
|
102 |
gr.Markdown("# Moe Japanese TTS And Voice Conversion Using VITS Model\n\n"
|
103 |
"![visitor badge](https://visitor-badge.glitch.me/badge?page_id=skytnt.moegoe)\n\n"
|
104 |
+
"unofficial demo for \n\n"
|
105 |
+
"- [https://github.com/CjangCjengh/MoeGoe](https://github.com/CjangCjengh/MoeGoe)\n"
|
106 |
+
"- [https://github.com/Francis-Komizu/VITS](https://github.com/Francis-Komizu/VITS)"
|
107 |
)
|
108 |
with gr.Tabs():
|
109 |
with gr.TabItem("TTS"):
|
110 |
+
with gr.Tabs():
|
111 |
+
for i, (models_name, cover_path, speakers, tts_fn, vc_fn) in enumerate(models):
|
112 |
+
with gr.TabItem(f"model{i}"):
|
113 |
+
with gr.Column():
|
114 |
+
gr.Markdown(f"## {models_name}\n\n"
|
115 |
+
f"![cover](file/{cover_path})")
|
116 |
+
tts_input1 = gr.TextArea(label="Text (150 words limitation)", value="γγγ«γ‘γ―γ")
|
117 |
+
tts_input2 = gr.Dropdown(label="Speaker", choices=speakers,
|
118 |
+
type="index", value=speakers[0])
|
119 |
+
tts_submit = gr.Button("Generate", variant="primary")
|
120 |
+
tts_output1 = gr.Textbox(label="Output Message")
|
121 |
+
tts_output2 = gr.Audio(label="Output Audio")
|
122 |
+
tts_submit.click(tts_fn, [tts_input1, tts_input2], [tts_output1, tts_output2])
|
123 |
with gr.TabItem("Voice Conversion"):
|
124 |
+
with gr.Tabs():
|
125 |
+
for i, (models_name, cover_path, speakers, tts_fn, vc_fn) in enumerate(models):
|
126 |
+
with gr.TabItem(f"model{i}"):
|
127 |
+
gr.Markdown(f"## {models_name}\n\n"
|
128 |
+
f"![cover](file/{cover_path})")
|
129 |
+
vc_input1 = gr.Dropdown(label="Original Speaker", choices=speakers, type="index",
|
130 |
+
value=speakers[0])
|
131 |
+
vc_input2 = gr.Dropdown(label="Target Speaker", choices=speakers, type="index",
|
132 |
+
value=speakers[1])
|
133 |
+
vc_input3 = gr.Audio(label="Input Audio (30s limitation)")
|
134 |
+
vc_submit = gr.Button("Convert", variant="primary")
|
135 |
+
vc_output1 = gr.Textbox(label="Output Message")
|
136 |
+
vc_output2 = gr.Audio(label="Output Audio")
|
137 |
+
vc_submit.click(vc_fn, [vc_input1, vc_input2, vc_input3], [vc_output1, vc_output2])
|
138 |
|
139 |
app.launch()
|
saved_model/0/cover.jpg
ADDED
Git LFS Details
|
saved_model/1/cover.jpg
ADDED
Git LFS Details
|
saved_model/2/config.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cf80c1c1a185384ea9c4c8ea63964e2fc592e6d2aad3f8566b534a512ed90c28
|
3 |
+
size 1294
|
saved_model/2/cover.jpg
ADDED
Git LFS Details
|
saved_model/2/model.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:16adcc6dd3f23ac4407176769f1e6843f86a5b16e04b8abb5a6a11132e6b9751
|
3 |
+
size 476622149
|
saved_model/3/config.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8507d589dd869adbc8f2f49f083930db85561184afaa9dc472006434e4bb1d7e
|
3 |
+
size 1246
|
saved_model/3/cover.jpg
ADDED
Git LFS Details
|
saved_model/3/model.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:60dfd6e56a1f895e3db4c054fd94d5a0362103dd5d2e19941e17dd1be41e6b11
|
3 |
+
size 476796721
|
saved_model/names.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9ea53575cc490320713273241bc46b30c0cb399fa1e49647a5c7e13cf5e2e444
|
3 |
+
size 201
|