import gradio as gr import torch from transformers import pipeline from timeit import default_timer as timer username = "fmagot01" ## Complete your username model_id = f"{username}/distilhubert-finetuned-gtzan" device = "cuda:0" if torch.cuda.is_available() else "cpu" pipe = pipeline("audio-classification", model=model_id, device=device) # def predict_trunc(filepath): # preprocessed = pipe.preprocess(filepath) # truncated = pipe.feature_extractor.pad(preprocessed,truncation=True, max_length = 16_000*30) # model_outputs = pipe.forward(truncated) # outputs = pipe.postprocess(model_outputs) # return outputs def classify_audio(filepath): """ Goes from [{'score': 0.8339303731918335, 'label': 'country'}, {'score': 0.11914275586605072, 'label': 'rock'},] to {"country": 0.8339303731918335, "rock":0.11914275586605072} """ start_time = timer() preds = pipe(filepath) # preds = predict_trunc(filepath) outputs = {} pred_time = round(timer() - start_time, 5) for p in preds: outputs[p["label"]] = p["score"] return outputs, pred_time #return outputs title = "Classifier of Music Genres" description = """ This is the demo of the finetuned classification model that we just trained on the [GTZAN](https://huggingface.co/datasets/marsyas/gtzan). You can upload your own audio file or used the ones already provided below. """ filenames = ['TAINY_88_melodic_loop_keys_las_Emin.wav', "TAINY_92_melodic_loop_keys_lam_Ebmin.wav", "TunePocket-Lively-Polka-Dance-30-Sec-Preview.mp3"] filenames = [[f"./{f}"] for f in filenames] demo = gr.Interface( fn=classify_audio, inputs=gr.Audio(type="filepath"), outputs=[gr.outputs.Label(label="Predictions"), gr.Number(label="Prediction time (s)") ], title=title, description=description, examples=filenames, ) demo.launch()