File size: 1,714 Bytes
073b014 4779018 073b014 a57990c 917e496 073b014 917e496 073b014 2b8fefd 073b014 836d09e 073b014 8add0d0 073b014 5fbc9ed 073b014 5701657 073b014 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
import torch
import gradio as gr
from transformers import Wav2Vec2FeatureExtractor
from datasets import Dataset
import librosa
def get_emotion(microphone, file_upload, task):
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("superb/hubert-large-superb-er")
warn_output = ""
if (microphone is not None) and (file_upload is not None):
warn_output = (
"WARNING: You've uploaded an audio file and used the microphone. "
"The recorded file from the microphone will be used and the uploaded audio will be discarded.\n"
)
elif (microphone is None) and (file_upload is None):
return "ERROR: You have to either use the microphone or upload an audio file"
model = torch.load('model.pth', map_location=torch.device('cpu'))
file = microphone if microphone is not None else file_upload
speech, _ = librosa.load(file, sr=16000, mono=True)
test = feature_extractor(speech, sampling_rate=16000, padding=True, return_tensors="pt" )
logits = model(**test).logits
predicted_ids = torch.argmax(logits, dim=-1)
labels = [model.config.id2label[_id] for _id in predicted_ids.tolist()]
return labels
demo = gr.Blocks()
mf_transcribe = gr.Interface(
fn=get_emotion,
inputs=[
gr.inputs.Audio(source="microphone", type="filepath", optional=True),
gr.inputs.Audio(source="upload", type="filepath", optional=True),
],
outputs="text",
layout="horizontal",
theme="huggingface",
title="AER",
description=(
"get the emotion"
),
allow_flagging="never",
)
with demo:
gr.TabbedInterface([mf_transcribe],'Trancribe')
demo.launch(enable_queue=True)
|