shtif commited on
Commit
1fbca42
1 Parent(s): 6bdfa24

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -0
app.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import pipeline
3
+
4
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
5
+ pipe = pipeline(
6
+ "automatic-speech-recognition", model="openai/whisper-base", device=device
7
+ )
8
+
9
+ def translate(audio):
10
+ outputs = pipe(audio, max_new_tokens=256, generate_kwargs={"task": "transcribe", "language": "it"})
11
+ return outputs["text"]
12
+
13
+ from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan
14
+
15
+ processor = SpeechT5Processor.from_pretrained("burraco135/speecht5_finetuned_voxpopuli_it")
16
+
17
+ model = SpeechT5ForTextToSpeech.from_pretrained("burraco135/speecht5_finetuned_voxpopuli_it")
18
+ vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
19
+
20
+ model.to(device)
21
+ vocoder.to(device)
22
+
23
+ embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
24
+ speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
25
+
26
+ def synthesise(text):
27
+ inputs = processor(text=text, return_tensors="pt")
28
+ speech = model.generate_speech(
29
+ inputs["input_ids"].to(device), speaker_embeddings.to(device), vocoder=vocoder
30
+ )
31
+ return speech.cpu()
32
+
33
+ import numpy as np
34
+
35
+ target_dtype = np.int16
36
+ max_range = np.iinfo(target_dtype).max
37
+
38
+
39
+ def speech_to_speech_translation(audio):
40
+ translated_text = translate(audio)
41
+ synthesised_speech = synthesise(translated_text)
42
+ synthesised_speech = (synthesised_speech.numpy() * max_range).astype(np.int16)
43
+ return 16000, synthesised_speech
44
+
45
+ import gradio as gr
46
+
47
+ demo = gr.Blocks()
48
+
49
+ mic_translate = gr.Interface(
50
+ fn=speech_to_speech_translation,
51
+ inputs=gr.Audio(source="microphone", type="filepath"),
52
+ outputs=gr.Audio(label="Generated Speech", type="numpy"),
53
+ )
54
+
55
+ file_translate = gr.Interface(
56
+ fn=speech_to_speech_translation,
57
+ inputs=gr.Audio(source="upload", type="filepath"),
58
+ outputs=gr.Audio(label="Generated Speech", type="numpy"),
59
+ )
60
+
61
+ with demo:
62
+ gr.TabbedInterface([mic_translate, file_translate], ["Microphone", "Audio File"])
63
+
64
+ demo.launch(debug=True)