from transformers import pipeline from datasets import load_dataset import soundfile as sf import torch synthesiser = pipeline("text-to-speech", "microsoft/speecht5_tts") def text_to_audio(text): # clean the response and max_size is 600 text_clean = text.replace('\n', '').replace('*', '') text_550 = text_clean[:590] # get speaker embeddings embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") speaker_embedding = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) # You can replace this embedding with your own as well. speech = synthesiser(text_550, forward_params={"speaker_embeddings": speaker_embedding}) sf.write("output.wav", speech["audio"], samplerate=speech["sampling_rate"]) audio_file = open("output.wav", "rb") audio_bytes = audio_file.read() return audio_bytes