|
""" |
|
If running this app in WSL2, you need to run the following command in the WSL2 terminal to get the IP address of the WSL2 instance: |
|
|
|
ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1 |
|
""" |
|
|
|
import asyncio |
|
import logging |
|
import os |
|
import random |
|
import re |
|
import sys |
|
|
|
import gradio as gr |
|
import spaces |
|
import torch |
|
|
|
from tools.i18n.i18n import I18nAuto, scan_language_list |
|
from TTS_infer_pack.text_segmentation_method import get_method |
|
from TTS_infer_pack.TTS import TTS, TTS_Config |
|
|
|
now_dir = os.getcwd() |
|
sys.path.append(now_dir) |
|
sys.path.append("%s/GPT_SoVITS" % (now_dir)) |
|
|
|
logging.getLogger("markdown_it").setLevel(logging.ERROR) |
|
logging.getLogger("urllib3").setLevel(logging.ERROR) |
|
logging.getLogger("httpcore").setLevel(logging.ERROR) |
|
logging.getLogger("httpx").setLevel(logging.ERROR) |
|
logging.getLogger("asyncio").setLevel(logging.ERROR) |
|
logging.getLogger("charset_normalizer").setLevel(logging.ERROR) |
|
logging.getLogger("torchaudio._extension").setLevel(logging.ERROR) |
|
|
|
|
|
if "_CUDA_VISIBLE_DEVICES" in os.environ: |
|
os.environ["CUDA_VISIBLE_DEVICES"] = os.environ["_CUDA_VISIBLE_DEVICES"] |
|
|
|
is_half = eval(os.environ.get("is_half", "True")) and torch.cuda.is_available() |
|
|
|
gpt_path = os.environ.get("gpt_path", None) |
|
sovits_path = os.environ.get("sovits_path", None) |
|
cnhubert_base_path = os.environ.get("cnhubert_base_path", None) |
|
bert_path = os.environ.get("bert_path", None) |
|
version = os.environ.get("version", "v2") |
|
|
|
|
|
language = os.environ.get("language", "Auto") |
|
language = sys.argv[-1] if sys.argv[-1] in scan_language_list() else language |
|
i18n = I18nAuto(language=language) |
|
|
|
|
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
dict_language_v2 = { |
|
i18n("粵語"): "yue", |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
} |
|
dict_language = dict_language_v2 |
|
|
|
cut_method = { |
|
i18n("不切"): "cut0", |
|
i18n("凑四句一切"): "cut1", |
|
i18n("凑50字一切"): "cut2", |
|
i18n("按中文句号。切"): "cut3", |
|
i18n("按英文句号.切"): "cut4", |
|
i18n("按标点符号切"): "cut5", |
|
} |
|
|
|
tts_config = TTS_Config("GPT_SoVITS/configs/tts_infer.yaml") |
|
tts_config.device = device |
|
tts_config.is_half = is_half |
|
tts_config.version = version |
|
if gpt_path is not None: |
|
tts_config.t2s_weights_path = gpt_path |
|
if sovits_path is not None: |
|
tts_config.vits_weights_path = sovits_path |
|
if cnhubert_base_path is not None: |
|
tts_config.cnhuhbert_base_path = cnhubert_base_path |
|
if bert_path is not None: |
|
tts_config.bert_base_path = bert_path |
|
|
|
print(tts_config) |
|
tts_pipeline = TTS(tts_config) |
|
gpt_path = tts_config.t2s_weights_path |
|
sovits_path = tts_config.vits_weights_path |
|
version = tts_config.version |
|
|
|
|
|
@spaces.GPU |
|
def inference( |
|
text, |
|
text_lang, |
|
ref_audio_path, |
|
aux_ref_audio_paths, |
|
prompt_text, |
|
prompt_lang, |
|
top_k, |
|
top_p, |
|
temperature, |
|
text_split_method, |
|
batch_size, |
|
speed_factor, |
|
ref_text_free, |
|
split_bucket, |
|
fragment_interval, |
|
seed, |
|
keep_random, |
|
parallel_infer, |
|
repetition_penalty, |
|
): |
|
seed = -1 if keep_random else seed |
|
actual_seed = seed if seed not in [-1, "", None] else random.randrange(1 << 32) |
|
inputs = { |
|
"text": text, |
|
"text_lang": dict_language[text_lang], |
|
"ref_audio_path": ref_audio_path, |
|
"aux_ref_audio_paths": [item.name for item in aux_ref_audio_paths] |
|
if aux_ref_audio_paths is not None |
|
else [], |
|
"prompt_text": prompt_text if not ref_text_free else "", |
|
"prompt_lang": dict_language[prompt_lang], |
|
"top_k": top_k, |
|
"top_p": top_p, |
|
"temperature": temperature, |
|
"text_split_method": cut_method[text_split_method], |
|
"batch_size": int(batch_size), |
|
"speed_factor": float(speed_factor), |
|
"split_bucket": split_bucket, |
|
"return_fragment": False, |
|
"fragment_interval": fragment_interval, |
|
"seed": actual_seed, |
|
"parallel_infer": parallel_infer, |
|
"repetition_penalty": repetition_penalty, |
|
} |
|
for item in tts_pipeline.run(inputs): |
|
yield item, actual_seed |
|
|
|
|
|
def custom_sort_key(s): |
|
|
|
parts = re.split("(\d+)", s) |
|
|
|
parts = [int(part) if part.isdigit() else part for part in parts] |
|
return parts |
|
|
|
|
|
def change_choices(): |
|
SoVITS_names, GPT_names = get_weights_names(GPT_weight_root, SoVITS_weight_root) |
|
return { |
|
"choices": sorted(SoVITS_names, key=custom_sort_key), |
|
"__type__": "update", |
|
}, {"choices": sorted(GPT_names, key=custom_sort_key), "__type__": "update"} |
|
|
|
|
|
pretrained_sovits_name = [ |
|
"GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s2G2333k.pth", |
|
"GPT_SoVITS/pretrained_models/s2G488k.pth", |
|
] |
|
pretrained_gpt_name = [ |
|
"GPT_SoVITS/pretrained_models/gsv-v2final-pretrained/s1bert25hz-5kh-longer-epoch=12-step=369668.ckpt", |
|
"GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt", |
|
] |
|
_ = [[], []] |
|
for i in range(2): |
|
if os.path.exists(pretrained_gpt_name[i]): |
|
_[0].append(pretrained_gpt_name[i]) |
|
if os.path.exists(pretrained_sovits_name[i]): |
|
_[-1].append(pretrained_sovits_name[i]) |
|
pretrained_gpt_name, pretrained_sovits_name = _ |
|
|
|
SoVITS_weight_root = ["SoVITS_weights_v2", "SoVITS_weights"] |
|
GPT_weight_root = ["GPT_weights_v2", "GPT_weights"] |
|
for path in SoVITS_weight_root + GPT_weight_root: |
|
os.makedirs(path, exist_ok=True) |
|
|
|
|
|
def get_weights_names(GPT_weight_root, SoVITS_weight_root): |
|
SoVITS_names = [i for i in pretrained_sovits_name] |
|
for path in SoVITS_weight_root: |
|
for name in os.listdir(path): |
|
if name.endswith(".pth"): |
|
SoVITS_names.append("%s/%s" % (path, name)) |
|
GPT_names = [i for i in pretrained_gpt_name] |
|
for path in GPT_weight_root: |
|
for name in os.listdir(path): |
|
if name.endswith(".ckpt"): |
|
GPT_names.append("%s/%s" % (path, name)) |
|
return SoVITS_names, GPT_names |
|
|
|
|
|
SoVITS_names, GPT_names = get_weights_names(GPT_weight_root, SoVITS_weight_root) |
|
|
|
|
|
def change_sovits_weights(sovits_path, prompt_language=None, text_language=None): |
|
tts_pipeline.init_vits_weights(sovits_path) |
|
global version, dict_language |
|
dict_language = dict_language_v2 |
|
if prompt_language is not None and text_language is not None: |
|
if prompt_language in list(dict_language.keys()): |
|
prompt_text_update, prompt_language_update = ( |
|
{"__type__": "update"}, |
|
{"__type__": "update", "value": prompt_language}, |
|
) |
|
else: |
|
prompt_text_update = {"__type__": "update", "value": ""} |
|
prompt_language_update = {"__type__": "update", "value": i18n("中文")} |
|
if text_language in list(dict_language.keys()): |
|
text_update, text_language_update = ( |
|
{"__type__": "update"}, |
|
{"__type__": "update", "value": text_language}, |
|
) |
|
else: |
|
text_update = {"__type__": "update", "value": ""} |
|
text_language_update = {"__type__": "update", "value": i18n("中文")} |
|
return ( |
|
{"__type__": "update", "choices": list(dict_language.keys())}, |
|
{"__type__": "update", "choices": list(dict_language.keys())}, |
|
prompt_text_update, |
|
prompt_language_update, |
|
text_update, |
|
text_language_update, |
|
) |
|
|
|
|
|
async def create_app(): |
|
with gr.Blocks(title="GPT-SoVITS 張悦楷") as app: |
|
gr.Markdown( |
|
value=""" |
|
# 張悦楷 GPT-SoVITS 語音合成器 |
|
""" |
|
) |
|
|
|
with gr.Column(): |
|
|
|
gr.Markdown(value=i18n("模型切换")) |
|
with gr.Row(): |
|
GPT_dropdown = gr.Dropdown( |
|
label=i18n("GPT模型列表"), |
|
choices=sorted(GPT_names, key=custom_sort_key), |
|
value=gpt_path, |
|
interactive=True, |
|
) |
|
SoVITS_dropdown = gr.Dropdown( |
|
label=i18n("SoVITS模型列表"), |
|
choices=sorted(SoVITS_names, key=custom_sort_key), |
|
value=sovits_path, |
|
interactive=True, |
|
) |
|
refresh_button = gr.Button(i18n("刷新模型路径"), variant="primary") |
|
refresh_button.click( |
|
fn=change_choices, |
|
inputs=[], |
|
outputs=[SoVITS_dropdown, GPT_dropdown], |
|
) |
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
gr.Markdown(value=i18n("*请上传并填写参考信息")) |
|
with gr.Row(): |
|
inp_ref = gr.Audio( |
|
label="上傳 3-10 秒長嘅參考音頻", type="filepath" |
|
) |
|
inp_refs = gr.File( |
|
label=i18n("辅参考音频(可选多个,或不选)"), |
|
file_count="multiple", |
|
) |
|
prompt_text = gr.Textbox( |
|
label=i18n("主参考音频的文本"), value="", lines=2 |
|
) |
|
with gr.Row(): |
|
prompt_language = gr.Dropdown( |
|
label=i18n("主参考音频的语种"), |
|
choices=list(dict_language.keys()), |
|
value=i18n("中文"), |
|
) |
|
with gr.Column(): |
|
ref_text_free = gr.Checkbox( |
|
label=i18n( |
|
"开启无参考文本模式。不填参考文本亦相当于开启。" |
|
), |
|
value=False, |
|
interactive=True, |
|
show_label=True, |
|
) |
|
gr.Markdown( |
|
i18n( |
|
"使用无参考文本模式时建议使用微调的GPT,听不清参考音频说的啥(不晓得写啥)可以开,开启后无视填写的参考文本。" |
|
) |
|
) |
|
|
|
with gr.Column(): |
|
gr.Markdown(value=i18n("*请填写需要合成的目标文本和语种模式")) |
|
text = gr.Textbox( |
|
label=i18n("需要合成的文本"), value="", lines=20, max_lines=20 |
|
) |
|
text_language = gr.Dropdown( |
|
label=i18n("需要合成的文本的语种"), |
|
choices=list(dict_language.keys()), |
|
value=i18n("中文"), |
|
) |
|
|
|
with gr.Group(): |
|
gr.Markdown(value=i18n("推理设置")) |
|
with gr.Row(): |
|
with gr.Column(): |
|
batch_size = gr.Slider( |
|
minimum=1, |
|
maximum=200, |
|
step=1, |
|
label=i18n("batch_size"), |
|
value=20, |
|
interactive=True, |
|
) |
|
fragment_interval = gr.Slider( |
|
minimum=0.01, |
|
maximum=1, |
|
step=0.01, |
|
label=i18n("分段间隔(秒)"), |
|
value=0.3, |
|
interactive=True, |
|
) |
|
speed_factor = gr.Slider( |
|
minimum=0.6, |
|
maximum=1.65, |
|
step=0.05, |
|
label="speed_factor", |
|
value=1.0, |
|
interactive=True, |
|
) |
|
top_k = gr.Slider( |
|
minimum=1, |
|
maximum=100, |
|
step=1, |
|
label=i18n("top_k"), |
|
value=5, |
|
interactive=True, |
|
) |
|
top_p = gr.Slider( |
|
minimum=0, |
|
maximum=1, |
|
step=0.05, |
|
label=i18n("top_p"), |
|
value=1, |
|
interactive=True, |
|
) |
|
temperature = gr.Slider( |
|
minimum=0, |
|
maximum=1, |
|
step=0.05, |
|
label=i18n("temperature"), |
|
value=1, |
|
interactive=True, |
|
) |
|
repetition_penalty = gr.Slider( |
|
minimum=0, |
|
maximum=2, |
|
step=0.05, |
|
label=i18n("重复惩罚"), |
|
value=1.35, |
|
interactive=True, |
|
) |
|
with gr.Column(): |
|
with gr.Row(): |
|
how_to_cut = gr.Dropdown( |
|
label=i18n("怎么切"), |
|
choices=[ |
|
i18n("不切"), |
|
i18n("凑四句一切"), |
|
i18n("凑50字一切"), |
|
i18n("按中文句号。切"), |
|
i18n("按英文句号.切"), |
|
i18n("按标点符号切"), |
|
], |
|
value=i18n("凑四句一切"), |
|
interactive=True, |
|
scale=1, |
|
) |
|
parallel_infer = gr.Checkbox( |
|
label=i18n("并行推理"), |
|
value=True, |
|
interactive=True, |
|
show_label=True, |
|
) |
|
split_bucket = gr.Checkbox( |
|
label=i18n("数据分桶(并行推理时会降低一点计算量)"), |
|
value=True, |
|
interactive=True, |
|
show_label=True, |
|
) |
|
|
|
with gr.Row(): |
|
seed = gr.Number(label=i18n("随机种子"), value=-1) |
|
keep_random = gr.Checkbox( |
|
label=i18n("保持随机"), |
|
value=True, |
|
interactive=True, |
|
show_label=True, |
|
) |
|
|
|
output = gr.Audio(label=i18n("输出的语音")) |
|
with gr.Row(): |
|
inference_button = gr.Button( |
|
i18n("合成语音"), variant="primary" |
|
) |
|
stop_infer = gr.Button(i18n("终止合成"), variant="primary") |
|
|
|
inference_button.click( |
|
inference, |
|
[ |
|
text, |
|
text_language, |
|
inp_ref, |
|
inp_refs, |
|
prompt_text, |
|
prompt_language, |
|
top_k, |
|
top_p, |
|
temperature, |
|
how_to_cut, |
|
batch_size, |
|
speed_factor, |
|
ref_text_free, |
|
split_bucket, |
|
fragment_interval, |
|
seed, |
|
keep_random, |
|
parallel_infer, |
|
repetition_penalty, |
|
], |
|
[output, seed], |
|
) |
|
stop_infer.click(tts_pipeline.stop, [], []) |
|
SoVITS_dropdown.change( |
|
change_sovits_weights, |
|
[SoVITS_dropdown, prompt_language, text_language], |
|
[ |
|
prompt_language, |
|
text_language, |
|
prompt_text, |
|
prompt_language, |
|
text, |
|
text_language, |
|
], |
|
) |
|
GPT_dropdown.change(tts_pipeline.init_t2s_weights, [GPT_dropdown], []) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return app |
|
|
|
|
|
if __name__ == "__main__": |
|
app = asyncio.run(create_app()) |
|
app.launch( |
|
|
|
|
|
|
|
|
|
|
|
) |
|
|