Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,13 +1,5 @@
|
|
1 |
import os
|
2 |
-
os.system("pip install gradio==3.3")
|
3 |
import gradio as gr
|
4 |
-
import numpy as np
|
5 |
-
|
6 |
-
title = "SpeechMatrix Speech-to-speech Translation"
|
7 |
-
|
8 |
-
description = "Gradio Demo for SpeechMatrix. To use it, simply record your audio, or click the example to load. Read more at the links below. \nNote: These models are trained on SpeechMatrix data only, and meant to serve as a baseline for future research."
|
9 |
-
|
10 |
-
article = "<p style='text-align: center'><a href='https://research.facebook.com/publications/speechmatrix' target='_blank'>SpeechMatrix</a> | <a href='https://github.com/facebookresearch/fairseq/tree/ust' target='_blank'>Github Repo</a></p>"
|
11 |
|
12 |
SRC_LIST = ['cs', 'de', 'en', 'es', 'et', 'fi', 'fr', 'hr', 'hu', 'it', 'nl', 'pl', 'pt', 'ro', 'sk', 'sl']
|
13 |
TGT_LIST = ['en', 'fr', 'es']
|
@@ -16,21 +8,17 @@ for src in SRC_LIST:
|
|
16 |
for tgt in TGT_LIST:
|
17 |
if src != tgt:
|
18 |
MODEL_LIST.append(f"textless_sm_{src}_{tgt}")
|
19 |
-
|
20 |
examples = []
|
21 |
|
22 |
-
io_dict = {model: gr.
|
23 |
-
|
24 |
def inference(audio, model):
|
25 |
-
out_audio = io_dict[model](audio)
|
26 |
-
return out_audio
|
|
|
27 |
gr.Interface(
|
28 |
inference,
|
29 |
-
[gr.
|
30 |
],
|
31 |
-
gr.
|
32 |
-
article=article,
|
33 |
-
title=title,
|
34 |
-
examples=examples,
|
35 |
-
cache_examples=False,
|
36 |
-
description=description).queue().launch()
|
|
|
1 |
import os
|
|
|
2 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
SRC_LIST = ['cs', 'de', 'en', 'es', 'et', 'fi', 'fr', 'hr', 'hu', 'it', 'nl', 'pl', 'pt', 'ro', 'sk', 'sl']
|
5 |
TGT_LIST = ['en', 'fr', 'es']
|
|
|
8 |
for tgt in TGT_LIST:
|
9 |
if src != tgt:
|
10 |
MODEL_LIST.append(f"textless_sm_{src}_{tgt}")
|
11 |
+
|
12 |
examples = []
|
13 |
|
14 |
+
io_dict = {model: gr.load(f"huggingface/facebook/{model}") for model in MODEL_LIST}
|
15 |
+
|
16 |
def inference(audio, model):
|
17 |
+
out_audio = io_dict[model](audio)
|
18 |
+
return out_audio
|
19 |
+
|
20 |
gr.Interface(
|
21 |
inference,
|
22 |
+
[gr.Audio(source="microphone", type="filepath", label="Input"),gr.Dropdown(choices=MODEL_LIST, type="value", label="Model")
|
23 |
],
|
24 |
+
gr.Audio(label="Output")).queue().launch()
|
|
|
|
|
|
|
|
|
|