|
import os |
|
import sys |
|
from dotenv import load_dotenv |
|
|
|
now_dir = os.getcwd() |
|
sys.path.append(now_dir) |
|
load_dotenv() |
|
load_dotenv("sha256.env") |
|
|
|
if sys.platform == "darwin": |
|
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" |
|
|
|
from infer.modules.vc import VC, show_info, hash_similarity |
|
from infer.modules.uvr5.modules import uvr |
|
from infer.lib.train.process_ckpt import ( |
|
change_info, |
|
extract_small_model, |
|
merge, |
|
) |
|
from i18n.i18n import I18nAuto |
|
from configs import Config |
|
from sklearn.cluster import MiniBatchKMeans |
|
import torch, platform |
|
import numpy as np |
|
import gradio as gr |
|
import faiss |
|
import pathlib |
|
import json |
|
from time import sleep |
|
from subprocess import Popen |
|
from random import shuffle |
|
import warnings |
|
import traceback |
|
import threading |
|
import shutil |
|
import logging |
|
|
|
|
|
logging.getLogger("numba").setLevel(logging.WARNING) |
|
logging.getLogger("httpx").setLevel(logging.WARNING) |
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
tmp = os.path.join(now_dir, "TEMP") |
|
shutil.rmtree(tmp, ignore_errors=True) |
|
os.makedirs(tmp, exist_ok=True) |
|
os.makedirs(os.path.join(now_dir, "logs"), exist_ok=True) |
|
os.makedirs(os.path.join(now_dir, "assets/weights"), exist_ok=True) |
|
os.environ["TEMP"] = tmp |
|
warnings.filterwarnings("ignore") |
|
torch.manual_seed(114514) |
|
|
|
|
|
config = Config() |
|
vc = VC(config) |
|
|
|
if not config.nocheck: |
|
from infer.lib.rvcmd import check_all_assets, download_all_assets |
|
|
|
if not check_all_assets(update=config.update): |
|
if config.update: |
|
download_all_assets(tmpdir=tmp) |
|
if not check_all_assets(update=config.update): |
|
logging.error("counld not satisfy all assets needed.") |
|
exit(1) |
|
|
|
if config.dml == True: |
|
|
|
def forward_dml(ctx, x, scale): |
|
ctx.scale = scale |
|
res = x.clone().detach() |
|
return res |
|
|
|
import fairseq |
|
|
|
fairseq.modules.grad_multiply.GradMultiply.forward = forward_dml |
|
|
|
i18n = I18nAuto() |
|
logger.info(i18n) |
|
|
|
ngpu = torch.cuda.device_count() |
|
gpu_infos = [] |
|
mem = [] |
|
if_gpu_ok = False |
|
|
|
if torch.cuda.is_available() or ngpu != 0: |
|
for i in range(ngpu): |
|
gpu_name = torch.cuda.get_device_name(i) |
|
if any( |
|
value in gpu_name.upper() |
|
for value in [ |
|
"10", |
|
"16", |
|
"20", |
|
"30", |
|
"40", |
|
"A2", |
|
"A3", |
|
"A4", |
|
"P4", |
|
"A50", |
|
"500", |
|
"A60", |
|
"70", |
|
"80", |
|
"90", |
|
"M4", |
|
"T4", |
|
"TITAN", |
|
"4060", |
|
"L", |
|
"6000", |
|
] |
|
): |
|
|
|
if_gpu_ok = True |
|
gpu_infos.append("%s\t%s" % (i, gpu_name)) |
|
mem.append( |
|
int( |
|
torch.cuda.get_device_properties(i).total_memory |
|
/ 1024 |
|
/ 1024 |
|
/ 1024 |
|
+ 0.4 |
|
) |
|
) |
|
if if_gpu_ok and len(gpu_infos) > 0: |
|
gpu_info = "\n".join(gpu_infos) |
|
default_batch_size = min(mem) // 2 |
|
else: |
|
gpu_info = i18n( |
|
"Unfortunately, there is no compatible GPU available to support your training." |
|
) |
|
default_batch_size = 1 |
|
gpus = "-".join([i[0] for i in gpu_infos]) |
|
|
|
|
|
weight_root = os.getenv("weight_root") |
|
weight_uvr5_root = os.getenv("weight_uvr5_root") |
|
index_root = os.getenv("index_root") |
|
outside_index_root = os.getenv("outside_index_root") |
|
|
|
names = [] |
|
for name in os.listdir(weight_root): |
|
if name.endswith(".pth"): |
|
names.append(name) |
|
index_paths = [] |
|
|
|
|
|
def lookup_indices(index_root): |
|
global index_paths |
|
for root, dirs, files in os.walk(index_root, topdown=False): |
|
for name in files: |
|
if name.endswith(".index") and "trained" not in name: |
|
index_paths.append("%s/%s" % (root, name)) |
|
|
|
|
|
lookup_indices(index_root) |
|
lookup_indices(outside_index_root) |
|
uvr5_names = [] |
|
for name in os.listdir(weight_uvr5_root): |
|
if name.endswith(".pth") or "onnx" in name: |
|
uvr5_names.append(name.replace(".pth", "")) |
|
|
|
|
|
def change_choices(): |
|
names = [] |
|
for name in os.listdir(weight_root): |
|
if name.endswith(".pth"): |
|
names.append(name) |
|
index_paths = [] |
|
for root, dirs, files in os.walk(index_root, topdown=False): |
|
for name in files: |
|
if name.endswith(".index") and "trained" not in name: |
|
index_paths.append("%s/%s" % (root, name)) |
|
return {"choices": sorted(names), "__type__": "update"}, { |
|
"choices": sorted(index_paths), |
|
"__type__": "update", |
|
} |
|
|
|
|
|
def clean(): |
|
return {"value": "", "__type__": "update"} |
|
|
|
|
|
def export_onnx(ModelPath, ExportedPath): |
|
from rvc.onnx import export_onnx as eo |
|
|
|
eo(ModelPath, ExportedPath) |
|
|
|
|
|
sr_dict = { |
|
"32k": 32000, |
|
"40k": 40000, |
|
"48k": 48000, |
|
} |
|
|
|
|
|
def if_done(done, p): |
|
while 1: |
|
if p.poll() is None: |
|
sleep(0.5) |
|
else: |
|
break |
|
done[0] = True |
|
|
|
|
|
def if_done_multi(done, ps): |
|
while 1: |
|
|
|
|
|
flag = 1 |
|
for p in ps: |
|
if p.poll() is None: |
|
flag = 0 |
|
sleep(0.5) |
|
break |
|
if flag == 1: |
|
break |
|
done[0] = True |
|
|
|
|
|
def preprocess_dataset(trainset_dir, exp_dir, sr, n_p): |
|
sr = sr_dict[sr] |
|
os.makedirs("%s/logs/%s" % (now_dir, exp_dir), exist_ok=True) |
|
f = open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "w") |
|
f.close() |
|
cmd = '"%s" infer/modules/train/preprocess.py "%s" %s %s "%s/logs/%s" %s %.1f' % ( |
|
config.python_cmd, |
|
trainset_dir, |
|
sr, |
|
n_p, |
|
now_dir, |
|
exp_dir, |
|
config.noparallel, |
|
config.preprocess_per, |
|
) |
|
logger.info("Execute: " + cmd) |
|
|
|
p = Popen(cmd, shell=True) |
|
|
|
done = [False] |
|
threading.Thread( |
|
target=if_done, |
|
args=( |
|
done, |
|
p, |
|
), |
|
).start() |
|
while 1: |
|
with open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "r") as f: |
|
yield (f.read()) |
|
sleep(1) |
|
if done[0]: |
|
break |
|
with open("%s/logs/%s/preprocess.log" % (now_dir, exp_dir), "r") as f: |
|
log = f.read() |
|
logger.info(log) |
|
yield log |
|
|
|
|
|
|
|
def extract_f0_feature(gpus, n_p, f0method, if_f0, exp_dir, version19, gpus_rmvpe): |
|
gpus = gpus.split("-") |
|
os.makedirs("%s/logs/%s" % (now_dir, exp_dir), exist_ok=True) |
|
f = open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "w") |
|
f.close() |
|
if if_f0: |
|
if f0method != "rmvpe_gpu": |
|
cmd = ( |
|
'"%s" infer/modules/train/extract/extract_f0_print.py "%s/logs/%s" %s %s' |
|
% ( |
|
config.python_cmd, |
|
now_dir, |
|
exp_dir, |
|
n_p, |
|
f0method, |
|
) |
|
) |
|
logger.info("Execute: " + cmd) |
|
p = Popen( |
|
cmd, shell=True, cwd=now_dir |
|
) |
|
|
|
done = [False] |
|
threading.Thread( |
|
target=if_done, |
|
args=( |
|
done, |
|
p, |
|
), |
|
).start() |
|
else: |
|
if gpus_rmvpe != "-": |
|
gpus_rmvpe = gpus_rmvpe.split("-") |
|
leng = len(gpus_rmvpe) |
|
ps = [] |
|
for idx, n_g in enumerate(gpus_rmvpe): |
|
cmd = ( |
|
'"%s" infer/modules/train/extract/extract_f0_rmvpe.py %s %s %s "%s/logs/%s" %s ' |
|
% ( |
|
config.python_cmd, |
|
leng, |
|
idx, |
|
n_g, |
|
now_dir, |
|
exp_dir, |
|
config.is_half, |
|
) |
|
) |
|
logger.info("Execute: " + cmd) |
|
p = Popen( |
|
cmd, shell=True, cwd=now_dir |
|
) |
|
ps.append(p) |
|
|
|
done = [False] |
|
threading.Thread( |
|
target=if_done_multi, |
|
args=( |
|
done, |
|
ps, |
|
), |
|
).start() |
|
else: |
|
cmd = ( |
|
config.python_cmd |
|
+ ' infer/modules/train/extract/extract_f0_rmvpe_dml.py "%s/logs/%s" ' |
|
% ( |
|
now_dir, |
|
exp_dir, |
|
) |
|
) |
|
logger.info("Execute: " + cmd) |
|
p = Popen( |
|
cmd, shell=True, cwd=now_dir |
|
) |
|
p.wait() |
|
done = [True] |
|
while 1: |
|
with open( |
|
"%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r" |
|
) as f: |
|
yield (f.read()) |
|
sleep(1) |
|
if done[0]: |
|
break |
|
with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f: |
|
log = f.read() |
|
logger.info(log) |
|
yield log |
|
|
|
""" |
|
n_part=int(sys.argv[1]) |
|
i_part=int(sys.argv[2]) |
|
i_gpu=sys.argv[3] |
|
exp_dir=sys.argv[4] |
|
os.environ["CUDA_VISIBLE_DEVICES"]=str(i_gpu) |
|
""" |
|
leng = len(gpus) |
|
ps = [] |
|
for idx, n_g in enumerate(gpus): |
|
cmd = ( |
|
'"%s" infer/modules/train/extract_feature_print.py %s %s %s %s "%s/logs/%s" %s %s' |
|
% ( |
|
config.python_cmd, |
|
config.device, |
|
leng, |
|
idx, |
|
n_g, |
|
now_dir, |
|
exp_dir, |
|
version19, |
|
config.is_half, |
|
) |
|
) |
|
logger.info("Execute: " + cmd) |
|
p = Popen( |
|
cmd, shell=True, cwd=now_dir |
|
) |
|
ps.append(p) |
|
|
|
done = [False] |
|
threading.Thread( |
|
target=if_done_multi, |
|
args=( |
|
done, |
|
ps, |
|
), |
|
).start() |
|
while 1: |
|
with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f: |
|
yield (f.read()) |
|
sleep(1) |
|
if done[0]: |
|
break |
|
with open("%s/logs/%s/extract_f0_feature.log" % (now_dir, exp_dir), "r") as f: |
|
log = f.read() |
|
logger.info(log) |
|
yield log |
|
|
|
|
|
def get_pretrained_models(path_str, f0_str, sr2): |
|
if_pretrained_generator_exist = os.access( |
|
"assets/pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2), os.F_OK |
|
) |
|
if_pretrained_discriminator_exist = os.access( |
|
"assets/pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2), os.F_OK |
|
) |
|
if not if_pretrained_generator_exist: |
|
logger.warning( |
|
"assets/pretrained%s/%sG%s.pth not exist, will not use pretrained model", |
|
path_str, |
|
f0_str, |
|
sr2, |
|
) |
|
if not if_pretrained_discriminator_exist: |
|
logger.warning( |
|
"assets/pretrained%s/%sD%s.pth not exist, will not use pretrained model", |
|
path_str, |
|
f0_str, |
|
sr2, |
|
) |
|
return ( |
|
( |
|
"assets/pretrained%s/%sG%s.pth" % (path_str, f0_str, sr2) |
|
if if_pretrained_generator_exist |
|
else "" |
|
), |
|
( |
|
"assets/pretrained%s/%sD%s.pth" % (path_str, f0_str, sr2) |
|
if if_pretrained_discriminator_exist |
|
else "" |
|
), |
|
) |
|
|
|
|
|
def change_sr2(sr2, if_f0_3, version19): |
|
path_str = "" if version19 == "v1" else "_v2" |
|
f0_str = "f0" if if_f0_3 else "" |
|
return get_pretrained_models(path_str, f0_str, sr2) |
|
|
|
|
|
def change_version19(sr2, if_f0_3, version19): |
|
path_str = "" if version19 == "v1" else "_v2" |
|
if sr2 == "32k" and version19 == "v1": |
|
sr2 = "40k" |
|
to_return_sr2 = ( |
|
{"choices": ["40k", "48k"], "__type__": "update", "value": sr2} |
|
if version19 == "v1" |
|
else {"choices": ["40k", "48k", "32k"], "__type__": "update", "value": sr2} |
|
) |
|
f0_str = "f0" if if_f0_3 else "" |
|
return ( |
|
*get_pretrained_models(path_str, f0_str, sr2), |
|
to_return_sr2, |
|
) |
|
|
|
|
|
def change_f0(if_f0_3, sr2, version19): |
|
path_str = "" if version19 == "v1" else "_v2" |
|
return ( |
|
{"visible": if_f0_3, "__type__": "update"}, |
|
{"visible": if_f0_3, "__type__": "update"}, |
|
*get_pretrained_models(path_str, "f0" if if_f0_3 == True else "", sr2), |
|
) |
|
|
|
|
|
|
|
def click_train( |
|
exp_dir1, |
|
sr2, |
|
if_f0_3, |
|
spk_id5, |
|
save_epoch10, |
|
total_epoch11, |
|
batch_size12, |
|
if_save_latest13, |
|
pretrained_G14, |
|
pretrained_D15, |
|
gpus16, |
|
if_cache_gpu17, |
|
if_save_every_weights18, |
|
version19, |
|
author, |
|
): |
|
|
|
exp_dir = "%s/logs/%s" % (now_dir, exp_dir1) |
|
os.makedirs(exp_dir, exist_ok=True) |
|
gt_wavs_dir = "%s/0_gt_wavs" % (exp_dir) |
|
feature_dir = ( |
|
"%s/3_feature256" % (exp_dir) |
|
if version19 == "v1" |
|
else "%s/3_feature768" % (exp_dir) |
|
) |
|
if if_f0_3: |
|
f0_dir = "%s/2a_f0" % (exp_dir) |
|
f0nsf_dir = "%s/2b-f0nsf" % (exp_dir) |
|
names = ( |
|
set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)]) |
|
& set([name.split(".")[0] for name in os.listdir(feature_dir)]) |
|
& set([name.split(".")[0] for name in os.listdir(f0_dir)]) |
|
& set([name.split(".")[0] for name in os.listdir(f0nsf_dir)]) |
|
) |
|
else: |
|
names = set([name.split(".")[0] for name in os.listdir(gt_wavs_dir)]) & set( |
|
[name.split(".")[0] for name in os.listdir(feature_dir)] |
|
) |
|
opt = [] |
|
for name in names: |
|
if if_f0_3: |
|
opt.append( |
|
"%s/%s.wav|%s/%s.npy|%s/%s.wav.npy|%s/%s.wav.npy|%s" |
|
% ( |
|
gt_wavs_dir.replace("\\", "\\\\"), |
|
name, |
|
feature_dir.replace("\\", "\\\\"), |
|
name, |
|
f0_dir.replace("\\", "\\\\"), |
|
name, |
|
f0nsf_dir.replace("\\", "\\\\"), |
|
name, |
|
spk_id5, |
|
) |
|
) |
|
else: |
|
opt.append( |
|
"%s/%s.wav|%s/%s.npy|%s" |
|
% ( |
|
gt_wavs_dir.replace("\\", "\\\\"), |
|
name, |
|
feature_dir.replace("\\", "\\\\"), |
|
name, |
|
spk_id5, |
|
) |
|
) |
|
fea_dim = 256 if version19 == "v1" else 768 |
|
if if_f0_3: |
|
for _ in range(2): |
|
opt.append( |
|
"%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s/logs/mute/2a_f0/mute.wav.npy|%s/logs/mute/2b-f0nsf/mute.wav.npy|%s" |
|
% (now_dir, sr2, now_dir, fea_dim, now_dir, now_dir, spk_id5) |
|
) |
|
else: |
|
for _ in range(2): |
|
opt.append( |
|
"%s/logs/mute/0_gt_wavs/mute%s.wav|%s/logs/mute/3_feature%s/mute.npy|%s" |
|
% (now_dir, sr2, now_dir, fea_dim, spk_id5) |
|
) |
|
shuffle(opt) |
|
with open("%s/filelist.txt" % exp_dir, "w") as f: |
|
f.write("\n".join(opt)) |
|
logger.debug("Write filelist done") |
|
logger.info("Use gpus: %s", str(gpus16)) |
|
if pretrained_G14 == "": |
|
logger.info("No pretrained Generator") |
|
if pretrained_D15 == "": |
|
logger.info("No pretrained Discriminator") |
|
if version19 == "v1" or sr2 == "40k": |
|
config_path = "v1/%s.json" % sr2 |
|
else: |
|
config_path = "v2/%s.json" % sr2 |
|
config_save_path = os.path.join(exp_dir, "config.json") |
|
if not pathlib.Path(config_save_path).exists(): |
|
with open(config_save_path, "w", encoding="utf-8") as f: |
|
json.dump( |
|
config.json_config[config_path], |
|
f, |
|
ensure_ascii=False, |
|
indent=4, |
|
sort_keys=True, |
|
) |
|
f.write("\n") |
|
cmd = ( |
|
'"%s" infer/modules/train/train.py -e "%s" -sr %s -f0 %s -bs %s -te %s -se %s %s %s -l %s -c %s -sw %s -v %s -a "%s"' |
|
% ( |
|
config.python_cmd, |
|
exp_dir1, |
|
sr2, |
|
1 if if_f0_3 else 0, |
|
batch_size12, |
|
total_epoch11, |
|
save_epoch10, |
|
'-pg "%s"' % pretrained_G14 if pretrained_G14 != "" else "", |
|
'-pd "%s"' % pretrained_D15 if pretrained_D15 != "" else "", |
|
1 if if_save_latest13 == i18n("Yes") else 0, |
|
1 if if_cache_gpu17 == i18n("Yes") else 0, |
|
1 if if_save_every_weights18 == i18n("Yes") else 0, |
|
version19, |
|
author, |
|
) |
|
) |
|
if gpus16: |
|
cmd += ' -g "%s"' % (gpus16) |
|
|
|
logger.info("Execute: " + cmd) |
|
p = Popen(cmd, shell=True, cwd=now_dir) |
|
p.wait() |
|
return "Training complete. You can check the training logs in the console or the 'train.log' file under the experiment folder." |
|
|
|
|
|
|
|
def train_index(exp_dir1, version19): |
|
|
|
exp_dir = "logs/%s" % (exp_dir1) |
|
os.makedirs(exp_dir, exist_ok=True) |
|
feature_dir = ( |
|
"%s/3_feature256" % (exp_dir) |
|
if version19 == "v1" |
|
else "%s/3_feature768" % (exp_dir) |
|
) |
|
if not os.path.exists(feature_dir): |
|
return "请先进行特征提取!" |
|
listdir_res = list(os.listdir(feature_dir)) |
|
if len(listdir_res) == 0: |
|
return "请先进行特征提取!" |
|
infos = [] |
|
npys = [] |
|
for name in sorted(listdir_res): |
|
phone = np.load("%s/%s" % (feature_dir, name)) |
|
npys.append(phone) |
|
big_npy = np.concatenate(npys, 0) |
|
big_npy_idx = np.arange(big_npy.shape[0]) |
|
np.random.shuffle(big_npy_idx) |
|
big_npy = big_npy[big_npy_idx] |
|
if big_npy.shape[0] > 2e5: |
|
infos.append("Trying doing kmeans %s shape to 10k centers." % big_npy.shape[0]) |
|
yield "\n".join(infos) |
|
try: |
|
big_npy = ( |
|
MiniBatchKMeans( |
|
n_clusters=10000, |
|
verbose=True, |
|
batch_size=256 * config.n_cpu, |
|
compute_labels=False, |
|
init="random", |
|
) |
|
.fit(big_npy) |
|
.cluster_centers_ |
|
) |
|
except: |
|
info = traceback.format_exc() |
|
logger.info(info) |
|
infos.append(info) |
|
yield "\n".join(infos) |
|
|
|
np.save("%s/total_fea.npy" % exp_dir, big_npy) |
|
n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39) |
|
infos.append("%s,%s" % (big_npy.shape, n_ivf)) |
|
yield "\n".join(infos) |
|
index = faiss.index_factory(256 if version19 == "v1" else 768, "IVF%s,Flat" % n_ivf) |
|
|
|
infos.append("training") |
|
yield "\n".join(infos) |
|
index_ivf = faiss.extract_index_ivf(index) |
|
index_ivf.nprobe = 1 |
|
index.train(big_npy) |
|
faiss.write_index( |
|
index, |
|
"%s/trained_IVF%s_Flat_nprobe_%s_%s_%s.index" |
|
% (exp_dir, n_ivf, index_ivf.nprobe, exp_dir1, version19), |
|
) |
|
infos.append("adding") |
|
yield "\n".join(infos) |
|
batch_size_add = 8192 |
|
for i in range(0, big_npy.shape[0], batch_size_add): |
|
index.add(big_npy[i : i + batch_size_add]) |
|
index_save_path = "%s/added_IVF%s_Flat_nprobe_%s_%s_%s.index" % ( |
|
exp_dir, |
|
n_ivf, |
|
index_ivf.nprobe, |
|
exp_dir1, |
|
version19, |
|
) |
|
faiss.write_index(index, index_save_path) |
|
infos.append(i18n("Successfully built index into") + " " + index_save_path) |
|
link_target = "%s/%s_IVF%s_Flat_nprobe_%s_%s_%s.index" % ( |
|
outside_index_root, |
|
exp_dir1, |
|
n_ivf, |
|
index_ivf.nprobe, |
|
exp_dir1, |
|
version19, |
|
) |
|
try: |
|
link = os.link if platform.system() == "Windows" else os.symlink |
|
link(index_save_path, link_target) |
|
infos.append(i18n("Link index to outside folder") + " " + link_target) |
|
except: |
|
infos.append( |
|
i18n("Link index to outside folder") |
|
+ " " |
|
+ link_target |
|
+ " " |
|
+ i18n("Fail") |
|
) |
|
|
|
|
|
|
|
yield "\n".join(infos) |
|
|
|
|
|
|
|
def train1key( |
|
exp_dir1, |
|
sr2, |
|
if_f0_3, |
|
trainset_dir4, |
|
spk_id5, |
|
np7, |
|
f0method8, |
|
save_epoch10, |
|
total_epoch11, |
|
batch_size12, |
|
if_save_latest13, |
|
pretrained_G14, |
|
pretrained_D15, |
|
gpus16, |
|
if_cache_gpu17, |
|
if_save_every_weights18, |
|
version19, |
|
gpus_rmvpe, |
|
author, |
|
): |
|
infos = [] |
|
|
|
def get_info_str(strr): |
|
infos.append(strr) |
|
return "\n".join(infos) |
|
|
|
|
|
yield get_info_str(i18n("Step 1: Processing data")) |
|
[get_info_str(_) for _ in preprocess_dataset(trainset_dir4, exp_dir1, sr2, np7)] |
|
|
|
|
|
yield get_info_str(i18n("step2:Pitch extraction & feature extraction")) |
|
[ |
|
get_info_str(_) |
|
for _ in extract_f0_feature( |
|
gpus16, np7, f0method8, if_f0_3, exp_dir1, version19, gpus_rmvpe |
|
) |
|
] |
|
|
|
|
|
yield get_info_str(i18n("Step 3a: Model training started")) |
|
click_train( |
|
exp_dir1, |
|
sr2, |
|
if_f0_3, |
|
spk_id5, |
|
save_epoch10, |
|
total_epoch11, |
|
batch_size12, |
|
if_save_latest13, |
|
pretrained_G14, |
|
pretrained_D15, |
|
gpus16, |
|
if_cache_gpu17, |
|
if_save_every_weights18, |
|
version19, |
|
author, |
|
) |
|
yield get_info_str( |
|
i18n( |
|
"Training complete. You can check the training logs in the console or the 'train.log' file under the experiment folder." |
|
) |
|
) |
|
|
|
|
|
[get_info_str(_) for _ in train_index(exp_dir1, version19)] |
|
yield get_info_str(i18n("All processes have been completed!")) |
|
|
|
|
|
|
|
def change_info_(ckpt_path): |
|
if not os.path.exists(ckpt_path.replace(os.path.basename(ckpt_path), "train.log")): |
|
return {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"} |
|
try: |
|
with open( |
|
ckpt_path.replace(os.path.basename(ckpt_path), "train.log"), "r" |
|
) as f: |
|
info = eval(f.read().strip("\n").split("\n")[0].split("\t")[-1]) |
|
sr, f0 = info["sample_rate"], info["if_f0"] |
|
version = "v2" if ("version" in info and info["version"] == "v2") else "v1" |
|
return sr, str(f0), version |
|
except: |
|
traceback.print_exc() |
|
return {"__type__": "update"}, {"__type__": "update"}, {"__type__": "update"} |
|
|
|
|
|
F0GPUVisible = config.dml == False |
|
|
|
|
|
def change_f0_method(f0method8): |
|
if f0method8 == "rmvpe_gpu": |
|
visible = F0GPUVisible |
|
else: |
|
visible = False |
|
return {"visible": visible, "__type__": "update"} |
|
|
|
|
|
|