Datasets:

Size:
n<1K
ArXiv:
License:
admin commited on
Commit
d8599ba
1 Parent(s): 43d0356
Files changed (3) hide show
  1. .gitignore +1 -0
  2. GZ_IsoTech.py +114 -0
  3. README.md +150 -1
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ rename.sh
GZ_IsoTech.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ import hashlib
4
+ import datasets
5
+ from datasets.tasks import AudioClassification
6
+
7
+ _DBNAME = os.path.basename(__file__).split(".")[0]
8
+
9
+ _DOMAIN = f"https://www.modelscope.cn/api/v1/datasets/ccmusic-database/{_DBNAME}/repo?Revision=master&FilePath=data"
10
+
11
+ _HOMEPAGE = f"https://www.modelscope.cn/datasets/ccmusic-database/{_DBNAME}"
12
+
13
+ _NAMES = {
14
+ "vibrato": ["颤音", "chan4_yin1"],
15
+ "upward_portamento": ["上滑音", "shang4_hua2_yin1"],
16
+ "downward_portamento": ["下滑音", "xia4_hua2_yin1"],
17
+ "returning_portamento": ["回滑音", "hui2_hua2_yin1"],
18
+ "glissando": ["刮奏, 花指", "gua1_zou4/hua1_zhi3"],
19
+ "tremolo": ["摇指", "yao2_zhi3"],
20
+ "harmonics": ["泛音", "fan4_yin1"],
21
+ "plucks": ["勾, 打, 抹, 托, ...", "gou1/da3/mo3/tuo1/etc"],
22
+ }
23
+
24
+
25
+ _URLS = {
26
+ "audio": f"{_DOMAIN}/audio.zip",
27
+ "mel": f"{_DOMAIN}/mel.zip",
28
+ }
29
+
30
+
31
+ class GZ_IsoTech(datasets.GeneratorBasedBuilder):
32
+ def _info(self):
33
+ return datasets.DatasetInfo(
34
+ features=datasets.Features(
35
+ {
36
+ "audio": datasets.Audio(sampling_rate=44100),
37
+ "mel": datasets.Image(),
38
+ "label": datasets.features.ClassLabel(names=list(_NAMES.keys())),
39
+ "cname": datasets.Value("string"),
40
+ "pinyin": datasets.Value("string"),
41
+ }
42
+ ),
43
+ supervised_keys=("audio", "label"),
44
+ homepage=_HOMEPAGE,
45
+ license="CC-BY-NC-ND",
46
+ version="1.2.0",
47
+ task_templates=[
48
+ AudioClassification(
49
+ task="audio-classification",
50
+ audio_column="audio",
51
+ label_column="label",
52
+ )
53
+ ],
54
+ )
55
+
56
+ def _str2md5(self, original_string: str):
57
+ md5_obj = hashlib.md5()
58
+ md5_obj.update(original_string.encode("utf-8"))
59
+ return md5_obj.hexdigest()
60
+
61
+ def _split_generators(self, dl_manager):
62
+ audio_files = dl_manager.download_and_extract(_URLS["audio"])
63
+ mel_files = dl_manager.download_and_extract(_URLS["mel"])
64
+ train_files, test_files = {}, {}
65
+ for path in dl_manager.iter_files([audio_files]):
66
+ fname: str = os.path.basename(path)
67
+ dirname = os.path.dirname(path)
68
+ splt = os.path.basename(os.path.dirname(dirname))
69
+ if fname.endswith(".wav"):
70
+ cls = f"{splt}/{os.path.basename(dirname)}/"
71
+ item_id = self._str2md5(cls + fname.split(".wa")[0])
72
+ if splt == "train":
73
+ train_files[item_id] = {"audio": path}
74
+ else:
75
+ test_files[item_id] = {"audio": path}
76
+
77
+ for path in dl_manager.iter_files([mel_files]):
78
+ fname = os.path.basename(path)
79
+ dirname = os.path.dirname(path)
80
+ splt = os.path.basename(os.path.dirname(dirname))
81
+ if fname.endswith(".jpg"):
82
+ cls = f"{splt}/{os.path.basename(dirname)}/"
83
+ item_id = self._str2md5(cls + fname.split(".jp")[0])
84
+ if splt == "train":
85
+ train_files[item_id]["mel"] = path
86
+ else:
87
+ test_files[item_id]["mel"] = path
88
+
89
+ trainset = list(train_files.values())
90
+ testset = list(test_files.values())
91
+ random.shuffle(trainset)
92
+ random.shuffle(testset)
93
+
94
+ return [
95
+ datasets.SplitGenerator(
96
+ name=datasets.Split.TRAIN,
97
+ gen_kwargs={"files": trainset},
98
+ ),
99
+ datasets.SplitGenerator(
100
+ name=datasets.Split.TEST,
101
+ gen_kwargs={"files": testset},
102
+ ),
103
+ ]
104
+
105
+ def _generate_examples(self, files):
106
+ for i, path in enumerate(files):
107
+ pt = os.path.basename(os.path.dirname(path["audio"]))
108
+ yield i, {
109
+ "audio": path["audio"],
110
+ "mel": path["mel"],
111
+ "label": pt,
112
+ "cname": _NAMES[pt][0],
113
+ "pinyin": _NAMES[pt][1],
114
+ }
README.md CHANGED
@@ -1,3 +1,152 @@
1
  ---
2
- license: mit
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ license: cc-by-nc-nd-4.0
3
+ task_categories:
4
+ - audio-classification
5
+ language:
6
+ - zh
7
+ - en
8
+ tags:
9
+ - music
10
+ - art
11
+ pretty_name: GZ_IsoTech Dataset
12
+ size_categories:
13
+ - n<1K
14
+ viewer: false
15
  ---
16
+
17
+ # Dataset Card for GZ_IsoTech Dataset
18
+ The raw dataset, sourced from [GZ_IsoTech](https://ccmusic-database.github.io/en/database/csmtd.html#GZTech), comprises 2,824 audio clips showcasing various guzheng playing techniques. Specifically, 2,328 clips were sourced from virtual sound banks, while 496 clips were performed by a skilled professional guzheng artist. These recordings encompass a comprehensive range of tones inherent to the guzheng instrument. Categorization of the clips is based on the diverse playing techniques characteristic of the guzheng; the clips are divided into eight categories: Vibrato (chanyin), Upward Portamento (shanghuayin), Downward Portamento (xiahuayin), Returning Portamento (huihuayin), Glissando (guazou, huazhi), Tremolo (yaozhi), Harmonic (fanyin), Plucks (gou, da, mo, tuo…).
19
+
20
+ Based on the aforementioned raw dataset, we conducted data processing to construct the `default subset` of the current integrated version of the dataset. Due to the pre-existing split in the raw dataset, wherein the data has been partitioned approximately in a 4:1 ratio for training and testing sets, we uphold the original data division approach for the default subset. The data structure of the default subset can be viewed in the [viewer](https://www.modelscope.cn/datasets/ccmusic-database/GZ_IsoTech/dataPeview). The `eval subset` was not further constructed as the original dataset had already been cited and used in published articles.
21
+
22
+ ## Viewer
23
+ <https://www.modelscope.cn/datasets/ccmusic-database/GZ_IsoTech/dataPeview>
24
+
25
+ ## Dataset Structure
26
+ <style>
27
+ .datastructure td {
28
+ vertical-align: middle !important;
29
+ text-align: center;
30
+ }
31
+ .datastructure th {
32
+ text-align: center;
33
+ }
34
+ </style>
35
+ <table class="datastructure">
36
+ <tr>
37
+ <th>audio</th>
38
+ <th>mel</th>
39
+ <th>label</th>
40
+ <th>cname</th>
41
+ </tr>
42
+ <tr>
43
+ <td>.wav, 44100Hz</td>
44
+ <td>.jpg, 44100Hz</td>
45
+ <td>8-class</td>
46
+ <td>string</td>
47
+ </tr>
48
+ <tr>
49
+ <td>...</td>
50
+ <td>...</td>
51
+ <td>...</td>
52
+ <td>...</td>
53
+ </tr>
54
+ </table>
55
+
56
+ ### Data Instances
57
+ .zip(.flac, .csv)
58
+
59
+ ### Data Fields
60
+ Categorization of the clips is based on the diverse playing techniques characteristic of the guzheng, the clips are divided into eight categories: Vibrato (chanyin), Upward Portamento (shanghuayin), Downward Portamento (xiahuayin), Returning Portamento (huihuayin), Glissando (guazou, huazhi), Tremolo (yaozhi), Harmonic (fanyin), Plucks (gou, da, mo, tuo…).
61
+ <img src="https://www.modelscope.cn/api/v1/datasets/ccmusic-database/GZ_IsoTech/repo?Revision=master&FilePath=.%2Fdata%2Fiso.png&View=true">
62
+
63
+ ### Data Splits
64
+ train, test
65
+
66
+ ## Dataset Description
67
+ - **Homepage:** <https://ccmusic-database.github.io>
68
+ - **Repository:** <https://huggingface.co/datasets/ccmusic-database/Guzheng_Tech99>
69
+ - **Paper:** <https://doi.org/10.5281/zenodo.5676893>
70
+ - **Leaderboard:** <https://www.modelscope.cn/datasets/ccmusic-database/GZ_IsoTech>
71
+ - **Point of Contact:** <https://arxiv.org/abs/2209.08774>
72
+
73
+ ### Dataset Summary
74
+ Due to the pre-existing split in the raw dataset, wherein the data has been partitioned approximately in a 4:1 ratio for training and testing sets, we uphold the original data division approach. In contrast to utilizing platform-specific automated splitting mechanisms, we directly employ the pre-split data for subsequent integration steps.
75
+
76
+ ### Supported Tasks and Leaderboards
77
+ MIR, audio classification
78
+
79
+ ### Languages
80
+ Chinese, English
81
+
82
+ ## Usage
83
+ ```python
84
+ from datasets import load_dataset
85
+
86
+ dataset = load_dataset("ccmusic-database/GZ_IsoTech")
87
+ for item in ds["train"]:
88
+ print(item)
89
+
90
+ for item in ds["test"]:
91
+ print(item)
92
+ ```
93
+
94
+ ## Maintenance
95
+ ```bash
96
+ GIT_LFS_SKIP_SMUDGE=1 git clone [email protected]:datasets/ccmusic-database/GZ_IsoTech
97
+ cd GZ_IsoTech
98
+ ```
99
+
100
+ ## Dataset Creation
101
+ ### Curation Rationale
102
+ The Guzheng is a kind of traditional Chinese instrument with diverse playing techniques. Instrument playing techniques (IPT) play an important role in musical performance. However, most of the existing works for IPT detection show low efficiency for variable-length audio and do not assure generalization as they rely on a single sound bank for training and testing. In this study, we propose an end-to-end Guzheng playing technique detection system using Fully Convolutional Networks that can be applied to variable-length audio. Because each Guzheng playing technique is applied to a note, a dedicated onset detector is trained to divide an audio into several notes and its predictions are fused with frame-wise IPT predictions. During fusion, we add the IPT predictions frame by frame inside each note and get the IPT with the highest probability within each note as the final output of that note. We create a new dataset named GZ_IsoTech from multiple sound banks and real-world recordings for Guzheng performance analysis. Our approach achieves 87.97% in frame-level accuracy and 80.76% in note-level F1 score, outperforming existing works by a large margin, which indicates the effectiveness of our proposed method in IPT detection.
103
+
104
+ ### Source Data
105
+ #### Initial Data Collection and Normalization
106
+ Dichucheng Li, Monan Zhou
107
+
108
+ #### Who are the source language producers?
109
+ Students from FD-LAMT
110
+
111
+ ### Annotations
112
+ #### Annotation process
113
+ This database contains 2824 audio clips of guzheng playing techniques. Among them, 2328 pieces were collected from virtual sound banks, and 496 pieces were played and recorded by a professional guzheng performer.
114
+
115
+ #### Who are the annotators?
116
+ Students from FD-LAMT
117
+
118
+ ### Personal and Sensitive Information
119
+ None
120
+
121
+ ## Considerations for Using the Data
122
+ ### Social Impact of Dataset
123
+ Promoting the development of the music AI industry
124
+
125
+ ### Discussion of Biases
126
+ Only for Traditional Chinese Instruments
127
+
128
+ ### Other Known Limitations
129
+ Insufficient sample
130
+
131
+ ## Additional Information
132
+ ### Dataset Curators
133
+ Dichucheng Li
134
+
135
+ ### Evaluation
136
+ [Li, Dichucheng, Yulun Wu, Qinyu Li, Jiahao Zhao, Yi Yu, Fan Xia and Wei Li. “Playing Technique Detection by Fusing Note Onset Information in Guzheng Performance.” International Society for Music Information Retrieval Conference (2022).](https://archives.ismir.net/ismir2022/paper/000037.pdf)
137
+
138
+ ### Citation Information
139
+ ```bibtex
140
+ @dataset{zhaorui_liu_2021_5676893,
141
+ author = {Monan Zhou, Shenyang Xu, Zhaorui Liu, Zhaowen Wang, Feng Yu, Wei Li and Baoqiang Han},
142
+ title = {CCMusic: an Open and Diverse Database for Chinese and General Music Information Retrieval Research},
143
+ month = {mar},
144
+ year = {2024},
145
+ publisher = {HuggingFace},
146
+ version = {1.2},
147
+ url = {https://huggingface.co/ccmusic-database}
148
+ }
149
+ ```
150
+
151
+ ### Contributions
152
+ Promoting the development of the music AI industry