|
import gradio as gr |
|
import styletts2importable |
|
import ljspeechimportable |
|
import torch |
|
import os |
|
from tortoise.utils.text import split_and_recombine_text |
|
import numpy as np |
|
import pickle |
|
theme = gr.themes.Base( |
|
font=[gr.themes.GoogleFont('Libre Franklin'), gr.themes.GoogleFont('Public Sans'), 'system-ui', 'sans-serif'], |
|
) |
|
voicelist = ['f-us-1', 'f-us-2', 'f-us-3', 'f-us-4', 'm-us-1', 'm-us-2', 'm-us-3', 'm-us-4'] |
|
voices = {} |
|
import phonemizer |
|
global_phonemizer = phonemizer.backend.EspeakBackend(language='en-us', preserve_punctuation=True, with_stress=True) |
|
|
|
|
|
|
|
|
|
|
|
for v in voicelist: |
|
voices[v] = styletts2importable.compute_style(f'voices/{v}.wav') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def synthesize(text, voice, lngsteps, password, progress=gr.Progress()): |
|
if text.strip() == "": |
|
raise gr.Error("You must enter some text") |
|
if len(text) > 7500: |
|
raise gr.Error("Text must be <7.5k characters") |
|
texts = split_and_recombine_text(text) |
|
v = voice.lower() |
|
audios = [] |
|
for t in progress.tqdm(texts): |
|
audios.append(styletts2importable.inference(t, voices[v], alpha=0.3, beta=0.7, diffusion_steps=lngsteps, embedding_scale=1)) |
|
return (24000, np.concatenate(audios)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def clsynthesize(text, voice, vcsteps, progress=gr.Progress()): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if text.strip() == "": |
|
raise gr.Error("You must enter some text") |
|
if len(text) > 7500: |
|
raise gr.Error("Text must be <7.5k characters") |
|
texts = split_and_recombine_text(text) |
|
audios = [] |
|
for t in progress.tqdm(texts): |
|
audios.append(styletts2importable.inference(t, styletts2importable.compute_style(voice), alpha=0.3, beta=0.7, diffusion_steps=vcsteps, embedding_scale=1)) |
|
return (24000, np.concatenate(audios)) |
|
def ljsynthesize(text, steps, progress=gr.Progress()): |
|
|
|
|
|
|
|
|
|
|
|
noise = torch.randn(1,1,256).to('cuda' if torch.cuda.is_available() else 'cpu') |
|
|
|
if text.strip() == "": |
|
raise gr.Error("You must enter some text") |
|
if len(text) > 7500: |
|
raise gr.Error("Text must be <7.5k characters") |
|
texts = split_and_recombine_text(text) |
|
audios = [] |
|
for t in progress.tqdm(texts): |
|
audios.append(ljspeechimportable.inference(t, noise, diffusion_steps=steps, embedding_scale=1)) |
|
return (24000, np.concatenate(audios)) |
|
|
|
|
|
with gr.Blocks() as vctk: |
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
inp = gr.Textbox(label="Text", info="What would you like StyleTTS 2 to read? It works better on full sentences.", interactive=True) |
|
voice = gr.Dropdown(voicelist, label="Voice", info="Select a default voice.", value='m-us-2', interactive=True) |
|
multispeakersteps = gr.Slider(minimum=3, maximum=15, value=7, step=1, label="Diffusion Steps", info="Theoretically, higher should be better quality but slower, but we cannot notice a difference. Try with lower steps first - it is faster", interactive=True) |
|
|
|
with gr.Column(scale=1): |
|
btn = gr.Button("Synthesize", variant="primary") |
|
audio = gr.Audio(interactive=False, label="Synthesized Audio") |
|
btn.click(synthesize, inputs=[inp, voice, multispeakersteps], outputs=[audio], concurrency_limit=4) |
|
with gr.Blocks() as clone: |
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
clinp = gr.Textbox(label="Text", info="What would you like StyleTTS 2 to read? It works better on full sentences.", interactive=True) |
|
clvoice = gr.Audio(label="Voice", interactive=True, type='filepath', max_length=300) |
|
vcsteps = gr.Slider(minimum=3, maximum=20, value=20, step=1, label="Diffusion Steps", info="Theoretically, higher should be better quality but slower, but we cannot notice a difference. Try with lower steps first - it is faster", interactive=True) |
|
with gr.Column(scale=1): |
|
clbtn = gr.Button("Synthesize", variant="primary") |
|
claudio = gr.Audio(interactive=False, label="Synthesized Audio") |
|
clbtn.click(clsynthesize, inputs=[clinp, clvoice, vcsteps], outputs=[claudio], concurrency_limit=4) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks() as lj: |
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
ljinp = gr.Textbox(label="Text", info="What would you like StyleTTS 2 to read? It works better on full sentences.", interactive=True) |
|
ljsteps = gr.Slider(minimum=3, maximum=20, value=3, step=1, label="Diffusion Steps", info="Theoretically, higher should be better quality but slower, but we cannot notice a difference. Try with lower steps first - it is faster", interactive=True) |
|
with gr.Column(scale=1): |
|
ljbtn = gr.Button("Synthesize", variant="primary") |
|
ljaudio = gr.Audio(interactive=False, label="Synthesized Audio") |
|
ljbtn.click(ljsynthesize, inputs=[ljinp, ljsteps], outputs=[ljaudio], concurrency_limit=4) |
|
with gr.Blocks(title="StyleTTS 2", css="footer{display:none !important}", theme=theme) as demo: |
|
gr.HTML("""<script async src="https://www.googletagmanager.com/gtag/js?id=G-RTMC15V61G"></script> |
|
<script> |
|
window.dataLayer = window.dataLayer || []; |
|
function gtag(){dataLayer.push(arguments);} |
|
gtag('js', new Date()); |
|
gtag('config', 'G-RTMC15V61G'); |
|
</script> |
|
<script type="text/javascript"> |
|
(function(c,l,a,r,i,t,y){ |
|
c[a]=c[a]||function(){(c[a].q=c[a].q||[]).push(arguments)}; |
|
t=l.createElement(r);t.async=1;t.src="https://www.clarity.ms/tag/"+i; |
|
y=l.getElementsByTagName(r)[0];y.parentNode.insertBefore(t,y); |
|
})(window, document, "clarity", "script", "jydi4lprw6"); |
|
</script>""") |
|
|
|
gr.TabbedInterface([vctk, clone, lj], ['Multi-Voice', 'Voice Cloning', 'LJSpeech', 'Long Text [Beta]']) |
|
if __name__ == "__main__": |
|
demo.queue(api_open=True, max_size=15).launch(show_api=True) |
|
|
|
|