ProjetosFSI commited on
Commit
7c7f25a
1 Parent(s): a8952ee

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +188 -0
app.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tempfile
2
+ from typing import Optional
3
+ from TTS.config import load_config
4
+ import gradio as gr
5
+ import numpy as np
6
+ from TTS.utils.manage import ModelManager
7
+ from TTS.utils.synthesizer import Synthesizer
8
+
9
+ # Classe estendida para confirmar automaticamente os termos de uso
10
+ class CustomModelManager(ModelManager):
11
+ def ask_tos(self, output_path):
12
+ print("This sentence has been generated by a speech synthesis system. tts_models/multilingual/multi-dataset/xtts_v1.1")
13
+ print(" > You must confirm the following:")
14
+ print(' | > "I have purchased a commercial license from Coqui: [email protected]"')
15
+ print(' | > "Otherwise, I agree to the terms of the non-commercial CPML: https://coqui.ai/cpml" - [y/n]')
16
+ answer = 'y' # Automaticamente aceita os termos
17
+ return answer.lower() == 'y'
18
+
19
+ # Substituir o gerenciador padrão pelo personalizado
20
+ manager = CustomModelManager()
21
+ MODELS = {}
22
+ SPEAKERS = {}
23
+ MAX_TXT_LEN = 100
24
+
25
+ MODEL_NAMES = manager.list_tts_models()
26
+
27
+ # filter out multi-speaker models and slow wavegrad vocoders
28
+ filters = ["vctk", "your_tts", "ek1"]
29
+ MODEL_NAMES = [model_name for model_name in MODEL_NAMES if not any(f in model_name for f in filters)]
30
+
31
+ EN = [el for el in MODEL_NAMES if "/en/" in el]
32
+ OTHER = [el for el in MODEL_NAMES if "/en/" not in el]
33
+ EN[0], EN[5] = EN[5], EN[0]
34
+ MODEL_NAMES = EN + OTHER
35
+
36
+ # reorder models
37
+ print(MODEL_NAMES)
38
+
39
+
40
+ def tts(text: str, model_name: str):
41
+ if len(text) > MAX_TXT_LEN:
42
+ text = text[:MAX_TXT_LEN]
43
+ print(f"Input text was cutoff since it went over the {MAX_TXT_LEN} character limit.")
44
+ print(text, model_name)
45
+ # download model
46
+ model_path, config_path, model_item = manager.download_model(model_name)
47
+ print(f"Model path: {model_path}")
48
+ print(f"Config path: {config_path}")
49
+ print(f"Model item: {model_item}")
50
+
51
+ if config_path is None and 'config.json' in model_item['hf_url']:
52
+ config_url = model_item['hf_url'][3] # Assuming the 4th URL is always the config.json
53
+ config_path = manager.download_from_url(config_url, model_name)
54
+ print(f"Downloaded config path: {config_path}")
55
+
56
+ if model_path is None or config_path is None:
57
+ raise ValueError("Model path or config path is None")
58
+
59
+ vocoder_name: Optional[str] = model_item["default_vocoder"]
60
+ # download vocoder
61
+ vocoder_path = None
62
+ vocoder_config_path = None
63
+ if vocoder_name is not None:
64
+ vocoder_path, vocoder_config_path, _ = manager.download_model(vocoder_name)
65
+
66
+ # init synthesizer
67
+ synthesizer = Synthesizer(
68
+ model_path, config_path, None, None, vocoder_path, vocoder_config_path,
69
+ )
70
+ # synthesize
71
+ if synthesizer is None:
72
+ raise NameError("model not found")
73
+ wavs = synthesizer.tts(text, None)
74
+ # return output
75
+ with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
76
+ synthesizer.save_wav(wavs, fp)
77
+ return fp.name
78
+
79
+
80
+ title = """<h1 align="center">🐸💬 CoquiTTS Playground </h1>"""
81
+
82
+ with gr.Blocks(analytics_enabled=False) as demo:
83
+ with gr.Row():
84
+ with gr.Column():
85
+ gr.Markdown(
86
+ """
87
+ ## <img src="https://raw.githubusercontent.com/coqui-ai/TTS/main/images/coqui-log-green-TTS.png" height="56"/>
88
+ """
89
+ )
90
+ gr.Markdown(
91
+ """
92
+ <br />
93
+
94
+ ## 🐸Coqui.ai News
95
+ - 📣 ⓍTTS, our production TTS model that can speak 13 languages, is released [Blog Post](https://coqui.ai/blog/tts/open_xtts), [Demo](https://huggingface.co/spaces/coqui/xtts), [Docs](https://tts.readthedocs.io/en/dev/models/xtts.html)
96
+ - 📣 [🐶Bark](https://github.com/suno-ai/bark) is now available for inference with unconstrained voice cloning. [Docs](https://tts.readthedocs.io/en/dev/models/bark.html)
97
+ - 📣 You can use [~1100 Fairseq models](https://github.com/facebookresearch/fairseq/tree/main/examples/mms) with 🐸TTS.
98
+ - 📣 🐸TTS now supports 🐢Tortoise with faster inference. [Docs](https://tts.readthedocs.io/en/dev/models/tortoise.html)
99
+ - 📣 **Coqui Studio API** is landed on 🐸TTS. - [Example](https://github.com/coqui-ai/TTS/blob/dev/README.md#-python-api)
100
+ - 📣 [**Coqui Studio API**](https://docs.coqui.ai/docs) is live.
101
+ - 📣 Voice generation with prompts - **Prompt to Voice** - is live on [**Coqui Studio**](https://app.coqui.ai/auth/signin)!! - [Blog Post](https://coqui.ai/blog/tts/prompt-to-voice)
102
+ - 📣 Voice generation with fusion - **Voice fusion** - is live on [**Coqui Studio**](https://app.coqui.ai/auth/signin).
103
+ - 📣 Voice cloning is live on [**Coqui Studio**](https://app.coqui.ai/auth/signin).
104
+ <br>
105
+
106
+ """
107
+ )
108
+ with gr.Column():
109
+ gr.Markdown(
110
+ """
111
+ <br/>
112
+
113
+ 💻 This space showcases some of the **[CoquiTTS](https://github.com/coqui-ai/TTS)** models.
114
+
115
+ <br/>
116
+
117
+ There are > 30 languages with single and multi speaker models, all thanks to our 👑 Contributors.
118
+
119
+ <br/>
120
+
121
+ Visit the links below for more.
122
+
123
+ | | |
124
+ | ------------------------------- | --------------------------------------- |
125
+ | 🐸💬 **CoquiTTS** | [Github](https://github.com/coqui-ai/TTS) |
126
+ | 💼 **Documentation** | [ReadTheDocs](https://tts.readthedocs.io/en/latest/)
127
+ | 👩‍💻 **Questions** | [GitHub Discussions] |
128
+ | 🗯 **Community** | [![Dicord](https://img.shields.io/discord/1037326658807533628?color=%239B59B6&label=chat%20on%20discord)](https://discord.gg/5eXr5seRrv) |
129
+
130
+ [github issue tracker]: https://github.com/coqui-ai/tts/issues
131
+ [github discussions]: https://github.com/coqui-ai/TTS/discussions
132
+ [discord]: https://discord.gg/5eXr5seRrv
133
+
134
+
135
+ """
136
+ )
137
+
138
+ with gr.Row():
139
+ gr.Markdown(
140
+ """
141
+ <details>
142
+ <summary>👑 Model contributors</summary>
143
+
144
+ - <a href="https://github.com/nmstoker/" target="_blank">@nmstoker</a>
145
+ - <a href="https://github.com/kaiidams/" target="_blank">@kaiidams</a>
146
+ - <a href="https://github.com/WeberJulian/" target="_blank">@WeberJulian,</a>
147
+ - <a href="https://github.com/Edresson/" target="_blank">@Edresson</a>
148
+ - <a href="https://github.com/thorstenMueller/" target="_blank">@thorstenMueller</a>
149
+ - <a href="https://github.com/r-dh/" target="_blank">@r-dh</a>
150
+ - <a href="https://github.com/kirianguiller/" target="_blank">@kirianguiller</a>
151
+ - <a href="https://github.com/robinhad/" target="_blank">@robinhad</a>
152
+ - <a href="https://github.com/fkarabiber/" target="_blank">@fkarabiber</a>
153
+ - <a href="https://github.com/nicolalandro/" target="_blank">@nicolalandro</a>
154
+ - <a href="https://github.com/a-froghyar" target="_blank">@a-froghyar</a>
155
+ - <a href="https://github.com/manmay-nakhashi" target="_blank">@manmay-nakhashi</a>
156
+ - <a href="https://github.com/noml4u" target="_blank">@noml4u</a>
157
+ </details>
158
+
159
+ <br/>
160
+ """
161
+ )
162
+
163
+ with gr.Row():
164
+ with gr.Column():
165
+ input_text = gr.inputs.Textbox(
166
+ label="Input Text",
167
+ default="This sentence has been generated by a speech synthesis system.",
168
+ )
169
+ model_select = gr.inputs.Dropdown(
170
+ label="Pick Model: tts_models/<language>/<dataset>/<model_name>",
171
+ choices=MODEL_NAMES,
172
+ default="tts_models/en/jenny/jenny"
173
+ )
174
+ tts_button = gr.Button("Send", elem_id="send-btn", visible=True)
175
+
176
+ with gr.Column():
177
+ output_audio = gr.outputs.Audio(label="Output", type="filepath")
178
+
179
+ tts_button.click(
180
+ tts,
181
+ inputs=[
182
+ input_text,
183
+ model_select,
184
+ ],
185
+ outputs=[output_audio],
186
+ )
187
+
188
+ demo.queue(concurrency_count=16).launch(debug=True)