Update app.py
Browse files
app.py
CHANGED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import gradio
|
3 |
+
from transformers import Wav2Vec2FeatureExtractor
|
4 |
+
from datasets import Dataset
|
5 |
+
import librosa
|
6 |
+
|
7 |
+
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("superb/hubert-large-superb-er")
|
8 |
+
|
9 |
+
def get_emotion(microphone, file_upload, task):
|
10 |
+
warn_output = ""
|
11 |
+
|
12 |
+
if (microphone is not None) and (file_upload is not None):
|
13 |
+
warn_output = (
|
14 |
+
"WARNING: You've uploaded an audio file and used the microphone. "
|
15 |
+
"The recorded file from the microphone will be used and the uploaded audio will be discarded.\n"
|
16 |
+
)
|
17 |
+
elif (microphone is None) and (file_upload is None):
|
18 |
+
return "ERROR: You have to either use the microphone or upload an audio file"
|
19 |
+
|
20 |
+
file = microphone if microphone is not None else file_upload
|
21 |
+
test = feature_extractor(file, sampling_rate=16000, padding=True, return_tensors="pt" ).to(torch.float32)
|
22 |
+
logits = model(**test).logits
|
23 |
+
predicted_ids = torch.argmax(logits, dim=-1)
|
24 |
+
labels = [model.config.id2label[_id] for _id in predicated_ids.tolist()]
|
25 |
+
return labels
|
26 |
+
|
27 |
+
demo = gr.Blocks()
|
28 |
+
|
29 |
+
mf_transcribe = gr.Interface(
|
30 |
+
fn=get_emotion,
|
31 |
+
inputs=[
|
32 |
+
gr.inputs.Audio(source="microphone", type="filepath", optional=True),
|
33 |
+
gr.inputs.Audio(source="upload", type="filepath", optional=True),,
|
34 |
+
],
|
35 |
+
outputs="text",
|
36 |
+
layout="horizontal",
|
37 |
+
theme="huggingface",
|
38 |
+
title="AER",
|
39 |
+
description=(
|
40 |
+
"get the emotion"
|
41 |
+
),
|
42 |
+
allow_flagging="never",
|
43 |
+
)
|
44 |
+
|
45 |
+
with demo:
|
46 |
+
gr.TabbledInterface([mf_transcribe],'Trancribe')
|
47 |
+
demo.launch(enable_queue=True)
|