fx dir
Browse files- app.py +187 -0
- tts_harvard.py +45 -82
- visualize_tts_plesantness.py +6 -6
app.py
ADDED
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
INTROTXT = """# StyleTTS 2
|
2 |
+
|
3 |
+
[Paper](https://arxiv.org/abs/2306.07691) - [Samples](https://styletts2.github.io/) - [Code](https://github.com/yl4579/StyleTTS2) - [Discord](https://discord.gg/ha8sxdG2K4)
|
4 |
+
|
5 |
+
A free demo of StyleTTS 2. **I am not affiliated with the StyleTTS 2 Authors.**
|
6 |
+
|
7 |
+
**Before using this demo, you agree to inform the listeners that the speech samples are synthesized by the pre-trained models, unless you have the permission to use the voice you synthesize. That is, you agree to only use voices whose speakers grant the permission to have their voice cloned, either directly or by license before making synthesized voices public, or you have to publicly announce that these voices are synthesized if you do not have the permission to use these voices.**
|
8 |
+
|
9 |
+
Is there a long queue on this space? Duplicate it and add a more powerful GPU to skip the wait! **Note: Thank you to Hugging Face for their generous GPU grant program!**
|
10 |
+
|
11 |
+
**NOTE: StyleTTS 2 does better on longer texts.** For example, making it say "hi" will produce a lower-quality result than making it say a longer phrase.
|
12 |
+
|
13 |
+
**NOTE: StyleTTS 2 is _currently_ English-only. Join the Discord for updates on multilingual training.**
|
14 |
+
"""
|
15 |
+
import gradio as gr
|
16 |
+
import styletts2importable
|
17 |
+
import ljspeechimportable
|
18 |
+
import torch
|
19 |
+
import os
|
20 |
+
from txtsplit import txtsplit
|
21 |
+
import numpy as np
|
22 |
+
import pickle
|
23 |
+
theme = gr.themes.Base(
|
24 |
+
font=[gr.themes.GoogleFont('Libre Franklin'), gr.themes.GoogleFont('Public Sans'), 'system-ui', 'sans-serif'],
|
25 |
+
)
|
26 |
+
voicelist = ['f-us-1', 'f-us-2', 'f-us-3', 'f-us-4', 'm-us-1', 'm-us-2', 'm-us-3', 'm-us-4']
|
27 |
+
voices = {}
|
28 |
+
import phonemizer
|
29 |
+
global_phonemizer = phonemizer.backend.EspeakBackend(language='en-us', preserve_punctuation=True, with_stress=True)
|
30 |
+
# todo: cache computed style, load using pickle
|
31 |
+
# if os.path.exists('voices.pkl'):
|
32 |
+
# with open('voices.pkl', 'rb') as f:
|
33 |
+
# voices = pickle.load(f)
|
34 |
+
# else:
|
35 |
+
for v in voicelist:
|
36 |
+
voices[v] = styletts2importable.compute_style(f'voices/{v}.wav')
|
37 |
+
# def synthesize(text, voice, multispeakersteps):
|
38 |
+
# if text.strip() == "":
|
39 |
+
# raise gr.Error("You must enter some text")
|
40 |
+
# # if len(global_phonemizer.phonemize([text])) > 300:
|
41 |
+
# if len(text) > 300:
|
42 |
+
# raise gr.Error("Text must be under 300 characters")
|
43 |
+
# v = voice.lower()
|
44 |
+
# # return (24000, styletts2importable.inference(text, voices[v], alpha=0.3, beta=0.7, diffusion_steps=7, embedding_scale=1))
|
45 |
+
# return (24000, styletts2importable.inference(text, voices[v], alpha=0.3, beta=0.7, diffusion_steps=multispeakersteps, embedding_scale=1))
|
46 |
+
if not torch.cuda.is_available(): INTROTXT += "\n\n### You are on a CPU-only system, inference will be much slower.\n\nYou can use the [online demo](https://huggingface.co/spaces/styletts2/styletts2) for fast inference."
|
47 |
+
def synthesize(text, voice, lngsteps, password, progress=gr.Progress()):
|
48 |
+
if text.strip() == "":
|
49 |
+
raise gr.Error("You must enter some text")
|
50 |
+
if len(text) > 50000:
|
51 |
+
raise gr.Error("Text must be <50k characters")
|
52 |
+
print("*** saying ***")
|
53 |
+
print(text)
|
54 |
+
print("*** end ***")
|
55 |
+
texts = txtsplit(text)
|
56 |
+
v = voice.lower()
|
57 |
+
audios = []
|
58 |
+
for t in progress.tqdm(texts):
|
59 |
+
print(t)
|
60 |
+
audios.append(styletts2importable.inference(t, voices[v], alpha=0.3, beta=0.7, diffusion_steps=lngsteps, embedding_scale=1))
|
61 |
+
return (24000, np.concatenate(audios))
|
62 |
+
# def longsynthesize(text, voice, lngsteps, password, progress=gr.Progress()):
|
63 |
+
# if password == os.environ['ACCESS_CODE']:
|
64 |
+
# if text.strip() == "":
|
65 |
+
# raise gr.Error("You must enter some text")
|
66 |
+
# if lngsteps > 25:
|
67 |
+
# raise gr.Error("Max 25 steps")
|
68 |
+
# if lngsteps < 5:
|
69 |
+
# raise gr.Error("Min 5 steps")
|
70 |
+
# texts = split_and_recombine_text(text)
|
71 |
+
# v = voice.lower()
|
72 |
+
# audios = []
|
73 |
+
# for t in progress.tqdm(texts):
|
74 |
+
# audios.append(styletts2importable.inference(t, voices[v], alpha=0.3, beta=0.7, diffusion_steps=lngsteps, embedding_scale=1))
|
75 |
+
# return (24000, np.concatenate(audios))
|
76 |
+
# else:
|
77 |
+
# raise gr.Error('Wrong access code')
|
78 |
+
def clsynthesize(text, voice, vcsteps, embscale, alpha, beta, progress=gr.Progress()):
|
79 |
+
# if text.strip() == "":
|
80 |
+
# raise gr.Error("You must enter some text")
|
81 |
+
# # if global_phonemizer.phonemize([text]) > 300:
|
82 |
+
# if len(text) > 400:
|
83 |
+
# raise gr.Error("Text must be under 400 characters")
|
84 |
+
# # return (24000, styletts2importable.inference(text, styletts2importable.compute_style(voice), alpha=0.3, beta=0.7, diffusion_steps=20, embedding_scale=1))
|
85 |
+
# return (24000, styletts2importable.inference(text, styletts2importable.compute_style(voice), alpha=0.3, beta=0.7, diffusion_steps=vcsteps, embedding_scale=1))
|
86 |
+
if text.strip() == "":
|
87 |
+
raise gr.Error("You must enter some text")
|
88 |
+
if len(text) > 50000:
|
89 |
+
raise gr.Error("Text must be <50k characters")
|
90 |
+
if embscale > 1.3 and len(text) < 20:
|
91 |
+
gr.Warning("WARNING: You entered short text, you may get static!")
|
92 |
+
print("*** saying ***")
|
93 |
+
print(text)
|
94 |
+
print("*** end ***")
|
95 |
+
texts = txtsplit(text)
|
96 |
+
audios = []
|
97 |
+
# vs = styletts2importable.compute_style(voice)
|
98 |
+
vs = styletts2importable.compute_style(voice)
|
99 |
+
# print(vs)
|
100 |
+
for t in progress.tqdm(texts):
|
101 |
+
audios.append(styletts2importable.inference(t, vs, alpha=alpha, beta=beta, diffusion_steps=vcsteps, embedding_scale=embscale))
|
102 |
+
# audios.append(styletts2importable.inference(t, vs, diffusion_steps=10, alpha=0.3, beta=0.7, embedding_scale=5))
|
103 |
+
return (24000, np.concatenate(audios))
|
104 |
+
def ljsynthesize(text, steps, progress=gr.Progress()):
|
105 |
+
# if text.strip() == "":
|
106 |
+
# raise gr.Error("You must enter some text")
|
107 |
+
# # if global_phonemizer.phonemize([text]) > 300:
|
108 |
+
# if len(text) > 400:
|
109 |
+
# raise gr.Error("Text must be under 400 characters")
|
110 |
+
noise = torch.randn(1,1,256).to('cuda' if torch.cuda.is_available() else 'cpu')
|
111 |
+
# return (24000, ljspeechimportable.inference(text, noise, diffusion_steps=7, embedding_scale=1))
|
112 |
+
if text.strip() == "":
|
113 |
+
raise gr.Error("You must enter some text")
|
114 |
+
if len(text) > 150000:
|
115 |
+
raise gr.Error("Text must be <150k characters")
|
116 |
+
print("*** saying ***")
|
117 |
+
print(text)
|
118 |
+
print("*** end ***")
|
119 |
+
texts = txtsplit(text)
|
120 |
+
audios = []
|
121 |
+
for t in progress.tqdm(texts):
|
122 |
+
audios.append(ljspeechimportable.inference(t, noise, diffusion_steps=steps, embedding_scale=1))
|
123 |
+
return (24000, np.concatenate(audios))
|
124 |
+
|
125 |
+
|
126 |
+
with gr.Blocks() as vctk:
|
127 |
+
with gr.Row():
|
128 |
+
with gr.Column(scale=1):
|
129 |
+
inp = gr.Textbox(label="Text", info="What would you like StyleTTS 2 to read? It works better on full sentences.", interactive=True)
|
130 |
+
voice = gr.Dropdown(voicelist, label="Voice", info="Select a default voice.", value='m-us-2', interactive=True)
|
131 |
+
multispeakersteps = gr.Slider(minimum=3, maximum=15, value=3, step=1, label="Diffusion Steps", info="Theoretically, higher should be better quality but slower, but we cannot notice a difference. Try with lower steps first - it is faster", interactive=True)
|
132 |
+
# use_gruut = gr.Checkbox(label="Use alternate phonemizer (Gruut) - Experimental")
|
133 |
+
with gr.Column(scale=1):
|
134 |
+
btn = gr.Button("Synthesize", variant="primary")
|
135 |
+
audio = gr.Audio(interactive=False, label="Synthesized Audio", waveform_options={'waveform_progress_color': '#3C82F6'})
|
136 |
+
btn.click(synthesize, inputs=[inp, voice, multispeakersteps], outputs=[audio], concurrency_limit=4)
|
137 |
+
with gr.Blocks() as clone:
|
138 |
+
with gr.Row():
|
139 |
+
with gr.Column(scale=1):
|
140 |
+
clinp = gr.Textbox(label="Text", info="What would you like StyleTTS 2 to read? It works better on full sentences.", interactive=True)
|
141 |
+
clvoice = gr.Audio(label="Voice", interactive=True, type='filepath', max_length=300, waveform_options={'waveform_progress_color': '#3C82F6'})
|
142 |
+
vcsteps = gr.Slider(minimum=3, maximum=20, value=20, step=1, label="Diffusion Steps", info="Theoretically, higher should be better quality but slower, but we cannot notice a difference. Try with lower steps first - it is faster", interactive=True)
|
143 |
+
embscale = gr.Slider(minimum=1, maximum=10, value=1, step=0.1, label="Embedding Scale (READ WARNING BELOW)", info="Defaults to 1. WARNING: If you set this too high and generate text that's too short you will get static!", interactive=True)
|
144 |
+
alpha = gr.Slider(minimum=0, maximum=1, value=0.3, step=0.1, label="Alpha", info="Defaults to 0.3", interactive=True)
|
145 |
+
beta = gr.Slider(minimum=0, maximum=1, value=0.7, step=0.1, label="Beta", info="Defaults to 0.7", interactive=True)
|
146 |
+
with gr.Column(scale=1):
|
147 |
+
clbtn = gr.Button("Synthesize", variant="primary")
|
148 |
+
claudio = gr.Audio(interactive=False, label="Synthesized Audio", waveform_options={'waveform_progress_color': '#3C82F6'})
|
149 |
+
clbtn.click(clsynthesize, inputs=[clinp, clvoice, vcsteps, embscale, alpha, beta], outputs=[claudio], concurrency_limit=4)
|
150 |
+
# with gr.Blocks() as longText:
|
151 |
+
# with gr.Row():
|
152 |
+
# with gr.Column(scale=1):
|
153 |
+
# lnginp = gr.Textbox(label="Text", info="What would you like StyleTTS 2 to read? It works better on full sentences.", interactive=True)
|
154 |
+
# lngvoice = gr.Dropdown(voicelist, label="Voice", info="Select a default voice.", value='m-us-1', interactive=True)
|
155 |
+
# lngsteps = gr.Slider(minimum=5, maximum=25, value=10, step=1, label="Diffusion Steps", info="Higher = better quality, but slower", interactive=True)
|
156 |
+
# lngpwd = gr.Textbox(label="Access code", info="This feature is in beta. You need an access code to use it as it uses more resources and we would like to prevent abuse")
|
157 |
+
# with gr.Column(scale=1):
|
158 |
+
# lngbtn = gr.Button("Synthesize", variant="primary")
|
159 |
+
# lngaudio = gr.Audio(interactive=False, label="Synthesized Audio")
|
160 |
+
# lngbtn.click(longsynthesize, inputs=[lnginp, lngvoice, lngsteps, lngpwd], outputs=[lngaudio], concurrency_limit=4)
|
161 |
+
with gr.Blocks() as lj:
|
162 |
+
with gr.Row():
|
163 |
+
with gr.Column(scale=1):
|
164 |
+
ljinp = gr.Textbox(label="Text", info="What would you like StyleTTS 2 to read? It works better on full sentences.", interactive=True)
|
165 |
+
ljsteps = gr.Slider(minimum=3, maximum=20, value=3, step=1, label="Diffusion Steps", info="Theoretically, higher should be better quality but slower, but we cannot notice a difference. Try with lower steps first - it is faster", interactive=True)
|
166 |
+
with gr.Column(scale=1):
|
167 |
+
ljbtn = gr.Button("Synthesize", variant="primary")
|
168 |
+
ljaudio = gr.Audio(interactive=False, label="Synthesized Audio", waveform_options={'waveform_progress_color': '#3C82F6'})
|
169 |
+
ljbtn.click(ljsynthesize, inputs=[ljinp, ljsteps], outputs=[ljaudio], concurrency_limit=4)
|
170 |
+
with gr.Blocks(title="StyleTTS 2", css="footer{display:none !important}", theme=theme) as demo:
|
171 |
+
gr.Markdown(INTROTXT)
|
172 |
+
gr.DuplicateButton("Duplicate Space")
|
173 |
+
# gr.TabbedInterface([vctk, clone, lj, longText], ['Multi-Voice', 'Voice Cloning', 'LJSpeech', 'Long Text [Beta]'])
|
174 |
+
gr.TabbedInterface([vctk, clone, lj], ['Multi-Voice', 'Voice Cloning', 'LJSpeech', 'Long Text [Beta]'])
|
175 |
+
gr.Markdown("""
|
176 |
+
Demo by [mrfakename](https://twitter.com/realmrfakename). I am not affiliated with the StyleTTS 2 authors.
|
177 |
+
|
178 |
+
Run this demo locally using Docker:
|
179 |
+
|
180 |
+
```bash
|
181 |
+
docker run -it -p 7860:7860 --platform=linux/amd64 --gpus all registry.hf.space/styletts2-styletts2:latest python app.py
|
182 |
+
```
|
183 |
+
""") # Please do not remove this line.
|
184 |
+
if __name__ == "__main__":
|
185 |
+
# demo.queue(api_open=False, max_size=15).launch(show_api=False)
|
186 |
+
demo.queue(api_open=False, max_size=15).launch(show_api=False)
|
187 |
+
|
tts_harvard.py
CHANGED
@@ -1,48 +1,42 @@
|
|
1 |
-
# Synthesize all Harvard Lists
|
2 |
#
|
3 |
-
# 1.
|
4 |
-
# Folder: 'prompt_mimic3/'
|
5 |
-
# 2. using mimic3 4x accelerated style
|
6 |
-
# Folder: 'prompt_mimic3speed/'
|
7 |
-
# 3. using crema-d style
|
8 |
-
# Folder: 'prompt_human/'
|
9 |
#
|
10 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
import soundfile
|
13 |
import json
|
14 |
import numpy as np
|
15 |
import audb
|
16 |
from pathlib import Path
|
17 |
-
|
18 |
LABELS = ['arousal', 'dominance', 'valence']
|
19 |
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
def load_speech(split=None):
|
24 |
DB = [
|
25 |
# [dataset, version, table, has_timdeltas_or_is_full_wavfile]
|
26 |
-
|
27 |
-
|
28 |
-
# ['entertain-playtestcloud', '1.1.0', 'emotion.categories.train.gold_standard', True],
|
29 |
-
# ['erik', '2.2.0', 'emotion.categories.train.gold_standard', True],
|
30 |
-
# ['meld', '1.3.1', 'emotion.categories.train.gold_standard', False],
|
31 |
-
# ['msppodcast', '5.0.0', 'emotion.categories.train.gold_standard', False], # tandalone bucket because it has gt labels?
|
32 |
-
# ['myai', '1.0.1', 'emotion.categories.train.gold_standard', False],
|
33 |
-
# ['casia', None, 'emotion.categories.gold_standard', False],
|
34 |
-
# ['switchboard-1', None, 'sentiment', True],
|
35 |
-
# ['swiss-parliament', None, 'segments', True],
|
36 |
-
# ['argentinian-parliament', None, 'segments', True],
|
37 |
-
# ['austrian-parliament', None, 'segments', True],
|
38 |
-
# #'german', --> bundestag
|
39 |
-
# ['brazilian-parliament', None, 'segments', True],
|
40 |
-
# ['mexican-parliament', None, 'segments', True],
|
41 |
-
# ['portuguese-parliament', None, 'segments', True],
|
42 |
-
# ['spanish-parliament', None, 'segments', True],
|
43 |
-
# ['chinese-vocal-emotions-liu-pell', None, 'emotion.categories.desired', False],
|
44 |
-
# peoples-speech slow
|
45 |
-
# ['peoples-speech', None, 'train-initial', False]
|
46 |
]
|
47 |
|
48 |
output_list = []
|
@@ -64,65 +58,38 @@ def load_speech(split=None):
|
|
64 |
output_list += [f for f in a.index] # use file (no timedeltas)
|
65 |
return output_list
|
66 |
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
# Generate 77 wavs
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
with open('voices.json', 'r') as f:
|
77 |
-
df = json.load(f)['voices']
|
78 |
-
voice_names = [v['voice'] for k,v in df.items()]
|
79 |
-
synthetic_wav_paths = []
|
80 |
-
synthetic_wav_paths_AFFECT = []
|
81 |
-
for voice in voice_names:
|
82 |
|
83 |
-
synthetic_wav_paths.append(
|
84 |
-
'assets/wavs/style_vector/' + voice.replace('/', '_').replace('#', '_').replace(
|
85 |
-
'cmu-arctic', 'cmu_arctic').replace('_low', '') + '.wav')
|
86 |
-
synthetic_wav_paths_AFFECT.append(
|
87 |
-
'assets/wavs/style_vector_v2/' + voice.replace('/', '_').replace('#', '_').replace(
|
88 |
-
'cmu-arctic', 'cmu_arctic').replace('_low', '') + '.wav')
|
89 |
-
|
90 |
|
91 |
-
print(len(synthetic_wav_paths))
|
92 |
-
|
93 |
-
|
94 |
-
natural_wav_paths = load_speech()
|
95 |
|
96 |
|
97 |
# SYNTHESIZE mimic mimicx4 crema-d
|
98 |
import msinference
|
99 |
-
|
100 |
|
101 |
with open('harvard.json', 'r') as f:
|
102 |
harvard_individual_sentences = json.load(f)['sentences']
|
103 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
for audio_prompt in ['mimic3', 'mimic3_speed', 'human']:
|
109 |
total_audio = []
|
110 |
ix = 0
|
111 |
-
for list_of_10 in harvard_individual_sentences:
|
112 |
# long_sentence = ' '.join(list_of_10['sentences'])
|
113 |
# harvard.append(long_sentence.replace('.', ' '))
|
114 |
for text in list_of_10['sentences']:
|
115 |
-
|
116 |
-
style_vec = msinference.compute_style(
|
117 |
-
synthetic_wav_paths[ix % 134])
|
118 |
-
elif audio_prompt == 'mimic3_speed':
|
119 |
-
style_vec = msinference.compute_style(
|
120 |
-
synthetic_wav_paths_AFFECT[ix % 134])
|
121 |
-
elif audio_prompt == 'human':
|
122 |
-
style_vec = msinference.compute_style(
|
123 |
-
natural_wav_paths[ix % len(natural_wav_paths)])
|
124 |
-
else:
|
125 |
-
print('unknonw list of style vecto')
|
126 |
print(ix, text)
|
127 |
ix += 1
|
128 |
x = msinference.inference(text,
|
@@ -133,10 +100,6 @@ for audio_prompt in ['mimic3', 'mimic3_speed', 'human']:
|
|
133 |
embedding_scale=1)
|
134 |
|
135 |
total_audio.append(x)
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
# -- for 77x lists
|
140 |
-
total_audio = np.concatenate(total_audio)
|
141 |
-
soundfile.write(f'{audio_prompt}_770.wav', total_audio, 24000)
|
142 |
-
print(f'{audio_prompt}_full_770.wav')
|
|
|
1 |
+
# Synthesize all Harvard Lists - 767 sentences as single .wav
|
2 |
#
|
3 |
+
# 1.
|
|
|
|
|
|
|
|
|
|
|
4 |
#
|
5 |
+
# './prompt_mimic3_english/'
|
6 |
+
#
|
7 |
+
# 2.
|
8 |
+
#
|
9 |
+
# './prompt_mimic3_english_4x/'
|
10 |
+
#
|
11 |
+
# 3.
|
12 |
+
#
|
13 |
+
# './prompt_human/'
|
14 |
+
#
|
15 |
+
# 4.
|
16 |
+
#
|
17 |
+
# './prompt_mimic3_foreign/'
|
18 |
+
#
|
19 |
+
# 5.
|
20 |
+
#
|
21 |
+
# './prompt_mimic3_foreign_4x/
|
22 |
+
#
|
23 |
+
#
|
24 |
+
# ----> THE .wavs will be used for visualisation
|
25 |
|
26 |
import soundfile
|
27 |
import json
|
28 |
import numpy as np
|
29 |
import audb
|
30 |
from pathlib import Path
|
31 |
+
import os
|
32 |
LABELS = ['arousal', 'dominance', 'valence']
|
33 |
|
34 |
|
35 |
+
def load_human_speech(split=None):
|
|
|
|
|
36 |
DB = [
|
37 |
# [dataset, version, table, has_timdeltas_or_is_full_wavfile]
|
38 |
+
# ['crema-d', '1.1.1', 'emotion.voice.test', False],
|
39 |
+
['emodb', '1.2.0', 'emotion.categories.train.gold_standard', False],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
]
|
41 |
|
42 |
output_list = []
|
|
|
58 |
output_list += [f for f in a.index] # use file (no timedeltas)
|
59 |
return output_list
|
60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
|
|
|
|
|
|
|
|
|
63 |
|
64 |
|
65 |
# SYNTHESIZE mimic mimicx4 crema-d
|
66 |
import msinference
|
67 |
+
prompt_paths = {}
|
68 |
|
69 |
with open('harvard.json', 'r') as f:
|
70 |
harvard_individual_sentences = json.load(f)['sentences']
|
71 |
|
72 |
+
for audio_prompt in [#'mimic3_english',
|
73 |
+
#'mimic3_english_4x',
|
74 |
+
'human',
|
75 |
+
'mimic3_foreign',
|
76 |
+
'mimic3_foreign_4x']:
|
77 |
+
|
78 |
+
if audio_prompt == 'human':
|
79 |
+
prompt_paths = load_human_speech() # better emodb ?
|
80 |
+
else:
|
81 |
+
prompt_dir = '/data/dkounadis/artificial-styletts2/' + audio_prompt + '/'
|
82 |
+
prompt_paths = [prompt_dir + f for f in os.listdir(prompt_dir)]
|
83 |
+
prompt_paths = prompt_paths[:10]
|
84 |
+
print(prompt_paths,'\n\n__________')
|
85 |
|
|
|
|
|
|
|
|
|
86 |
total_audio = []
|
87 |
ix = 0
|
88 |
+
for list_of_10 in harvard_individual_sentences[:1]:
|
89 |
# long_sentence = ' '.join(list_of_10['sentences'])
|
90 |
# harvard.append(long_sentence.replace('.', ' '))
|
91 |
for text in list_of_10['sentences']:
|
92 |
+
style_vec = msinference.compute_style(prompt_paths[ix % len(prompt_paths)])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
print(ix, text)
|
94 |
ix += 1
|
95 |
x = msinference.inference(text,
|
|
|
100 |
embedding_scale=1)
|
101 |
|
102 |
total_audio.append(x)
|
103 |
+
total_audio = np.concatenate(total_audio) # -- concat 77x lists
|
104 |
+
soundfile.write(f'{audio_prompt}_767_5.wav', total_audio, 24000)
|
105 |
+
print(f'{audio_prompt}_767_5.wav')
|
|
|
|
|
|
|
|
visualize_tts_plesantness.py
CHANGED
@@ -81,12 +81,12 @@ def _sigmoid(x):
|
|
81 |
|
82 |
# for mimic3/mimic3speed/human - concat all 77 and run timeseries with 7s hop 3s
|
83 |
for long_audio in [
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
file_interface = f'timeseries_{long_audio.replace("/", "")}.pkl'
|
91 |
if not os.path.exists(file_interface):
|
92 |
|
|
|
81 |
|
82 |
# for mimic3/mimic3speed/human - concat all 77 and run timeseries with 7s hop 3s
|
83 |
for long_audio in [
|
84 |
+
'mimic3_english_767_5.wav',
|
85 |
+
'mimic3_english_4x_767_5.wav',
|
86 |
+
'human_767_5.wav',
|
87 |
+
'mimic3_foregin_767_5.wav',
|
88 |
+
'mimic3_foreign_4x_767_5.wav'
|
89 |
+
]:
|
90 |
file_interface = f'timeseries_{long_audio.replace("/", "")}.pkl'
|
91 |
if not os.path.exists(file_interface):
|
92 |
|