Spaces:
Running
Running
File size: 7,366 Bytes
ca90f09 d02ad9c 2f7d9da d02ad9c 194fffd d739858 194fffd f74bce2 ee48acc f74bce2 2fb935b f74bce2 d6aee2b e886026 939c1fe d636635 ee48acc f74bce2 194fffd fc50d18 194fffd e1c65f1 e7a921a 533ef97 e886026 533ef97 e886026 533ef97 e886026 533ef97 e1c65f1 3444a7f 0dfedcd d636635 f74bce2 d636635 f74bce2 939c1fe d636635 0dfedcd f74bce2 0dfedcd f74bce2 939c1fe 0dfedcd e886026 d636635 f74bce2 d636635 f74bce2 939c1fe d636635 e886026 0dfedcd e886026 194fffd 142fdc7 3c140e1 142fdc7 e886026 142fdc7 c6f8b29 142fdc7 0dfedcd 142fdc7 262bc3f 142fdc7 939c1fe f74bce2 0287131 939c1fe 58c1f3f 939c1fe 142fdc7 194fffd 98d97a5 2cd6c69 194fffd cff1c92 3444a7f 0f6c489 e886026 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 |
import sys
import os
# By using XTTS you agree to CPML license https://coqui.ai/cpml
os.environ["COQUI_TOS_AGREED"] = "1"
import gradio as gr
from TTS.api import TTS
tts = TTS("tts_models/multilingual/multi-dataset/xtts_v1", gpu=False)
#tts.to("cuda") # cuda only
def predict(prompt, language, audio_file_pth, mic_file_path, use_mic, agree):
if agree == True:
if use_mic == True:
if mic_file_path is not None:
speaker_wav=mic_file_path
else:
gr.Warning("Please record your voice with Microphone, or uncheck Use Microphone to use reference audios")
return (
None,
None,
)
else:
speaker_wav=audio_file_pth
if len(prompt)<2:
gr.Warning("Please give a longer prompt text")
return (
None,
None,
)
if len(prompt)>200:
gr.Warning("Text length limited to 200 characters for this demo, please try shorter text")
return (
None,
None,
)
try:
tts.tts_to_file(
text=prompt,
file_path="output.wav",
speaker_wav=speaker_wav,
language=language,
)
except RuntimeError as e :
if "device-assert" in str(e):
# cannot do anything on cuda device side error, need tor estart
gr.Warning("Unhandled Exception encounter, please retry in a minute")
print("Cuda device-assert Runtime encountered need restart")
sys.exit("Exit due to cuda device-assert")
else:
raise e
return (
gr.make_waveform(
audio="output.wav",
),
"output.wav",
)
else:
gr.Warning("Please accept the Terms & Condition!")
return (
None,
None,
)
title = "Coqui🐸 XTTS"
description = """
<a href="https://huggingface.co/coqui/XTTS-v1">XTTS</a> is a Voice generation model that lets you clone voices into different languages by using just a quick 3-second audio clip.
<br/>
XTTS is built on previous research, like Tortoise, with additional architectural innovations and training to make cross-language voice cloning and multilingual speech generation possible.
<br/>
This is the same model that powers our creator application <a href="https://coqui.ai">Coqui Studio</a> as well as the <a href="https://docs.coqui.ai">Coqui API</a>. In production we apply modifications to make low-latency streaming possible.
<br/>
Leave a star on the Github <a href="https://github.com/coqui-ai/TTS">🐸TTS</a>, where our open-source inference and training code lives.
<br/>
<p>For faster inference without waiting in the queue, you should duplicate this space and upgrade to GPU via the settings.
<br/>
<a href="https://huggingface.co/spaces/coqui/xtts?duplicate=true">
<img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
</p>
"""
article = """
<div style='margin:20px auto;'>
<p>By using this demo you agree to the terms of the Coqui Public Model License at https://coqui.ai/cpml</p>
</div>
"""
examples = [
[
"Once when I was six years old I saw a magnificent picture",
"en",
"examples/female.wav",
None,
False,
True,
],
[
"Lorsque j'avais six ans j'ai vu, une fois, une magnifique image",
"fr",
"examples/male.wav",
None,
False,
True,
],
[
"Als ich sechs war, sah ich einmal ein wunderbares Bild",
"de",
"examples/female.wav",
None,
False,
True,
],
[
"Cuando tenía seis años, vi una vez una imagen magnífica",
"es",
"examples/male.wav",
None,
False,
True,
],
[
"Quando eu tinha seis anos eu vi, uma vez, uma imagem magnífica",
"pt",
"examples/female.wav",
None,
False,
True,
],
[
"Kiedy miałem sześć lat, zobaczyłem pewnego razu wspaniały obrazek",
"pl",
"examples/male.wav",
None,
False,
True,
],
[
"Un tempo lontano, quando avevo sei anni, vidi un magnifico disegno",
"it",
"examples/female.wav",
None,
False,
True,
],
[
"Bir zamanlar, altı yaşındayken, muhteşem bir resim gördüm",
"tr",
"examples/female.wav",
None,
False,
True,
],
[
"Когда мне было шесть лет, я увидел однажды удивительную картинку",
"ru",
"examples/female.wav",
None,
False,
True,
],
[
"Toen ik een jaar of zes was, zag ik op een keer een prachtige plaat",
"nl",
"examples/male.wav",
None,
False,
True,
],
[
"Když mi bylo šest let, viděl jsem jednou nádherný obrázek",
"cs",
"examples/female.wav",
None,
False,
True,
],
[
"当我还只有六岁的时候, 看到了一副精彩的插画",
"zh-cn",
"examples/female.wav",
None,
False,
True,
],
]
gr.Interface(
fn=predict,
inputs=[
gr.Textbox(
label="Text Prompt",
info="One or two sentences at a time is better",
value="Hi there, I'm your new voice clone. Try your best to upload quality audio",
),
gr.Dropdown(
label="Language",
info="Select an output language for the synthesised speech",
choices=[
"en",
"es",
"fr",
"de",
"it",
"pt",
"pl",
"tr",
"ru",
"nl",
"cs",
"ar",
"zh-cn",
],
max_choices=1,
value="en",
),
gr.Audio(
label="Reference Audio",
info="Click on the ✎ button to upload your own target speaker audio",
type="filepath",
value="examples/female.wav",
),
gr.Audio(source="microphone",
type="filepath",
info="Use your microphone to record audio",
label="Use Microphone for Reference"),
gr.Checkbox(label="Check to use Microphone as Reference",
value=False,
info="Notice: Microphone input may not work properly under traffic",),
gr.Checkbox(
label="Agree",
value=False,
info="I agree to the terms of the Coqui Public Model License at https://coqui.ai/cpml",
),
],
outputs=[
gr.Video(label="Waveform Visual"),
gr.Audio(label="Synthesised Audio"),
],
title=title,
description=description,
article=article,
examples=examples,
).queue().launch(debug=True) |