acapella / acapella.py
admin
upd api
9035011
raw
history blame
4.64 kB
import os
import datasets
import pandas as pd
from datasets.tasks import AudioClassification
_NAMES = {
"songs": [f"song{i}" for i in range(1, 7)],
"singers": [f"singer{i}" for i in range(1, 23)],
}
_HOMEPAGE = f"https://www.modelscope.cn/datasets/ccmusic-database/{os.path.basename(__file__)[:-3]}"
_DOMAIN = f"{_HOMEPAGE}/resolve/master/data"
_URLS = {
"audio": f"{_DOMAIN}/audio.zip",
"mel": f"{_DOMAIN}/mel.zip",
}
class acapella(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features(
{
"audio": datasets.Audio(sampling_rate=48000),
"mel": datasets.Image(),
"singer_id": datasets.features.ClassLabel(names=_NAMES["singers"]),
"pitch": datasets.Value("float32"),
"rhythm": datasets.Value("float32"),
"vocal_range": datasets.Value("float32"),
"timbre": datasets.Value("float32"),
"pronunciation": datasets.Value("float32"),
"vibrato": datasets.Value("float32"),
"dynamic": datasets.Value("float32"),
"breath_control": datasets.Value("float32"),
"overall_performance": datasets.Value("float32"),
}
),
supervised_keys=("audio", "singer_id"),
homepage=_HOMEPAGE,
license="CC-BY-NC-ND",
version="1.2.0",
task_templates=[
AudioClassification(
task="audio-classification",
audio_column="audio",
label_column="singer_id",
)
],
)
def _split_generators(self, dl_manager):
songs = {}
for index in _NAMES["songs"]:
csv_files = dl_manager.download(f"{_DOMAIN}/{index}.csv")
song_eval = pd.read_csv(csv_files, index_col="singer_id")
scores = []
for i in range(len(_NAMES["singers"])):
scores.append(
{
"pitch": song_eval.iloc[i]["pitch"],
"rhythm": song_eval.iloc[i]["rhythm"],
"vocal_range": song_eval.iloc[i]["vocal_range"],
"timbre": song_eval.iloc[i]["timbre"],
"pronunciation": song_eval.iloc[i]["pronunciation"],
"vibrato": song_eval.iloc[i]["vibrato"],
"dynamic": song_eval.iloc[i]["dynamic"],
"breath_control": song_eval.iloc[i]["breath_control"],
"overall_performance": song_eval.iloc[i]["overall_performance"],
}
)
songs[index] = scores
audio_files = dl_manager.download_and_extract(_URLS["audio"])
for fpath in dl_manager.iter_files([audio_files]):
fname: str = os.path.basename(fpath)
if fname.endswith(".wav"):
song_id = os.path.basename(os.path.dirname(fpath))
singer_id = int(fname.split("(")[1].split(")")[0]) - 1
songs[song_id][singer_id]["audio"] = fpath
mel_files = dl_manager.download_and_extract(_URLS["mel"])
for fpath in dl_manager.iter_files([mel_files]):
fname = os.path.basename(fpath)
if fname.endswith(".jpg"):
song_id = os.path.basename(os.path.dirname(fpath))
singer_id = int(fname.split("(")[1].split(")")[0]) - 1
songs[song_id][singer_id]["mel"] = fpath
split_generator = []
for key in songs.keys():
split_generator.append(
datasets.SplitGenerator(
name=key,
gen_kwargs={"files": songs[key]},
)
)
return split_generator
def _generate_examples(self, files):
for i, item in enumerate(files):
yield i, {
"audio": item["audio"],
"mel": item["mel"],
"singer_id": i,
"pitch": item["pitch"],
"rhythm": item["rhythm"],
"vocal_range": item["vocal_range"],
"timbre": item["timbre"],
"pronunciation": item["pronunciation"],
"vibrato": item["vibrato"],
"dynamic": item["dynamic"],
"breath_control": item["breath_control"],
"overall_performance": item["overall_performance"],
}