Spaces:
Runtime error
Runtime error
File size: 1,091 Bytes
06aba51 7e2ad98 06aba51 7e2ad98 280610d 7e2ad98 a8807eb 7e2ad98 f640edc 7e2ad98 280610d f640edc 5b52d90 280610d 7e2ad98 3f5bf4e 7e2ad98 a8807eb 7e2ad98 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
import gradio as gr
import librosa
import numpy as np
import torch
from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan
checkpoint = "burraco135/speecht5_finetuned_voxpopuli_it"
processor = SpeechT5Processor.from_pretrained(checkpoint)
model = SpeechT5ForTextToSpeech.from_pretrained(checkpoint)
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
def predict(text, speaker):
speaker_embedding = np.load("speaker_0_embeddings.npy")
inputs = processor(text=text, return_tensors="pt")
# limit input length
input_ids = inputs["input_ids"]
input_ids = input_ids[..., :model.config.max_text_positions]
speaker_embedding = torch.tensor(speaker_embedding).unsqueeze(0)
speech = model.generate_speech(input_ids, speaker_embedding, vocoder=vocoder)
speech = (speech.numpy() * 32767).astype(np.int16)
return (16000, speech)
gr.Interface(
fn=predict,
inputs=[
gr.Text(label="Input Text"),
],
outputs=[
gr.Audio(label="Generated Speech", type="numpy"),
]
).launch() |