JaesungHuh's picture
First version
b11593d
raw
history blame
484 Bytes
import gradio as gr
import torch
from model import ECAPA_gender
model = ECAPA_gender({"C": 1024})
model.load_state_dict(torch.load("gender_classifier.model", map_location="cpu"))
model.eval()
def predict_gender(filepath):
with torch.no_grad():
output = model.predict(filepath)
return output
audio_component = gr.Audio(type='filepath', label="Upload your audio file here")
demo = gr.Interface(fn=predict_gender, inputs=audio_component, outputs="text")
demo.launch()