File size: 484 Bytes
59b76ad
b11593d
8324298
59b76ad
b11593d
 
59b76ad
b11593d
 
 
 
 
 
 
 
 
59b76ad
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
import gradio as gr
import torch
from model import ECAPA_gender

model = ECAPA_gender({"C": 1024})
model.load_state_dict(torch.load("gender_classifier.model", map_location="cpu"))

model.eval()

def predict_gender(filepath):
    with torch.no_grad():
        output = model.predict(filepath)
    return output

audio_component = gr.Audio(type='filepath', label="Upload your audio file here")
demo = gr.Interface(fn=predict_gender, inputs=audio_component, outputs="text")
demo.launch()