Spaces:
Running
on
T4
Running
on
T4
File size: 2,602 Bytes
194fffd fc50d18 194fffd 142fdc7 194fffd 142fdc7 533ef97 194fffd fc50d18 194fffd e1c65f1 533ef97 e1c65f1 3444a7f 194fffd 142fdc7 fc50d18 142fdc7 262bc3f 142fdc7 fc50d18 142fdc7 194fffd 533ef97 194fffd cff1c92 3444a7f 142fdc7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
import gradio as gr
from TTS.api import TTS
tts = TTS("tts_models/multilingual/multi-dataset/xtts_v1")
tts.to("cuda")
def predict(prompt, language, audio_file_pth):
tts.tts_to_file(
text=prompt,
file_path="output.wav",
speaker_wav=audio_file_pth,
language=language,
)
return gr.make_waveform(
audio="output.wav",
), gr.Audio(audio="output.wav")
title = "Coqui🐸 XTTS"
description = """
XTTS is a Voice generation model that lets you clone voices into different languages by using just a quick 3-second audio clip.
<br/>
Built on Tortoise, XTTS has important model changes that make cross-language voice cloning and multi-lingual speech generation super easy.
<br/>
This is the same model that powers Coqui Studio, and Coqui API, however we apply a few tricks to make it faster and support streaming inference.
<br/>
<br/>
<p>For faster inference without waiting in the queue, you should duplicate this space and upgrade to GPU via the settings.
<br/>
<a href="https://huggingface.co/spaces/coqui/xtts?duplicate=true">
<img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
</p>
"""
article = """
<div style='margin:20px auto;'>
<p>By using this demo you agree to the terms of the Coqui Public Model License at https://coqui.ai/cpml</p>
</div>
"""
gr.Interface(
fn=predict,
inputs=[
gr.Textbox(
label="Text Prompt",
info="One or two sentences at a time is better",
placeholder="It took me quite a long time to develop a voice, and now that I have it I'm not going to be silent.",
),
gr.Dropdown(
label="Language",
info="Select an output language for the synthesised speech",
choices=[
"en",
"es",
"fr",
"de",
"it",
"pt",
"pl",
"tr",
"ru",
"nl",
"cz",
"ar",
"zh",
],
max_choices=1,
value="en"
),
gr.Audio(
label="Reference Audio",
info="Click on the ✎ button to upload your own target speaker audio",
type="filepath",
value="examples/en_speaker_6.wav"
),
],
outputs=[
gr.Video(label="Synthesised Waveform"),
gr.Audio(label="Synthesised Audio")
],
title=title,
description=description,
article=article,
).launch(debug=True)
|