Update coqui.py
Browse files
coqui.py
CHANGED
@@ -9,9 +9,8 @@ import torch
|
|
9 |
import torchaudio
|
10 |
import numpy as np
|
11 |
|
12 |
-
|
13 |
-
#
|
14 |
-
#download for mecab
|
15 |
os.system('python -m unidic download')
|
16 |
|
17 |
# By using XTTS you agree to CPML license https://coqui.ai/cpml
|
@@ -33,10 +32,12 @@ from TTS.api import TTS
|
|
33 |
from TTS.tts.configs.xtts_config import XttsConfig
|
34 |
from TTS.tts.models.xtts import Xtts
|
35 |
from TTS.utils.generic_utils import get_user_data_dir
|
|
|
36 |
|
37 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
|
|
|
|
38 |
|
39 |
-
from huggingface_hub import HfApi
|
40 |
# will use api to restart space on a unrecoverable error
|
41 |
api = HfApi(token=HF_TOKEN)
|
42 |
repo_id = "coqui/xtts"
|
@@ -54,15 +55,24 @@ config = XttsConfig()
|
|
54 |
config.load_json(os.path.join(model_path, "config.json"))
|
55 |
|
56 |
model = Xtts.init_from_config(config)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
model.load_checkpoint(
|
58 |
config,
|
59 |
-
checkpoint_path=
|
60 |
-
vocab_path=
|
61 |
eval=True,
|
62 |
use_deepspeed=True,
|
63 |
)
|
64 |
model.cuda()
|
65 |
|
|
|
66 |
# This is for debugging purposes only
|
67 |
DEVICE_ASSERT_DETECTED = 0
|
68 |
DEVICE_ASSERT_PROMPT = None
|
|
|
9 |
import torchaudio
|
10 |
import numpy as np
|
11 |
|
12 |
+
# update gradio to faster streaming
|
13 |
+
# download for mecab
|
|
|
14 |
os.system('python -m unidic download')
|
15 |
|
16 |
# By using XTTS you agree to CPML license https://coqui.ai/cpml
|
|
|
32 |
from TTS.tts.configs.xtts_config import XttsConfig
|
33 |
from TTS.tts.models.xtts import Xtts
|
34 |
from TTS.utils.generic_utils import get_user_data_dir
|
35 |
+
from huggingface_hub import HfApi
|
36 |
|
37 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
38 |
+
if not HF_TOKEN:
|
39 |
+
raise ValueError("HF_TOKEN environment variable is not set")
|
40 |
|
|
|
41 |
# will use api to restart space on a unrecoverable error
|
42 |
api = HfApi(token=HF_TOKEN)
|
43 |
repo_id = "coqui/xtts"
|
|
|
55 |
config.load_json(os.path.join(model_path, "config.json"))
|
56 |
|
57 |
model = Xtts.init_from_config(config)
|
58 |
+
checkpoint_path = os.path.join(model_path, "model.pth")
|
59 |
+
vocab_path = os.path.join(model_path, "vocab.json")
|
60 |
+
|
61 |
+
if not os.path.exists(checkpoint_path):
|
62 |
+
raise FileNotFoundError(f"Checkpoint file not found at {checkpoint_path}")
|
63 |
+
if not os.path.exists(vocab_path):
|
64 |
+
raise FileNotFoundError(f"Vocab file not found at {vocab_path}")
|
65 |
+
|
66 |
model.load_checkpoint(
|
67 |
config,
|
68 |
+
checkpoint_path=checkpoint_path,
|
69 |
+
vocab_path=vocab_path,
|
70 |
eval=True,
|
71 |
use_deepspeed=True,
|
72 |
)
|
73 |
model.cuda()
|
74 |
|
75 |
+
|
76 |
# This is for debugging purposes only
|
77 |
DEVICE_ASSERT_DETECTED = 0
|
78 |
DEVICE_ASSERT_PROMPT = None
|