File size: 4,640 Bytes
7ca6a63
 
 
 
 
 
 
 
 
 
 
9035011
7ca6a63
9035011
7ca6a63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9035011
7ca6a63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
import os
import datasets
import pandas as pd
from datasets.tasks import AudioClassification


_NAMES = {
    "songs": [f"song{i}" for i in range(1, 7)],
    "singers": [f"singer{i}" for i in range(1, 23)],
}

_HOMEPAGE = f"https://www.modelscope.cn/datasets/ccmusic-database/{os.path.basename(__file__)[:-3]}"

_DOMAIN = f"{_HOMEPAGE}/resolve/master/data"

_URLS = {
    "audio": f"{_DOMAIN}/audio.zip",
    "mel": f"{_DOMAIN}/mel.zip",
}


class acapella(datasets.GeneratorBasedBuilder):
    def _info(self):
        return datasets.DatasetInfo(
            features=datasets.Features(
                {
                    "audio": datasets.Audio(sampling_rate=48000),
                    "mel": datasets.Image(),
                    "singer_id": datasets.features.ClassLabel(names=_NAMES["singers"]),
                    "pitch": datasets.Value("float32"),
                    "rhythm": datasets.Value("float32"),
                    "vocal_range": datasets.Value("float32"),
                    "timbre": datasets.Value("float32"),
                    "pronunciation": datasets.Value("float32"),
                    "vibrato": datasets.Value("float32"),
                    "dynamic": datasets.Value("float32"),
                    "breath_control": datasets.Value("float32"),
                    "overall_performance": datasets.Value("float32"),
                }
            ),
            supervised_keys=("audio", "singer_id"),
            homepage=_HOMEPAGE,
            license="CC-BY-NC-ND",
            version="1.2.0",
            task_templates=[
                AudioClassification(
                    task="audio-classification",
                    audio_column="audio",
                    label_column="singer_id",
                )
            ],
        )

    def _split_generators(self, dl_manager):
        songs = {}
        for index in _NAMES["songs"]:
            csv_files = dl_manager.download(f"{_DOMAIN}/{index}.csv")
            song_eval = pd.read_csv(csv_files, index_col="singer_id")
            scores = []
            for i in range(len(_NAMES["singers"])):
                scores.append(
                    {
                        "pitch": song_eval.iloc[i]["pitch"],
                        "rhythm": song_eval.iloc[i]["rhythm"],
                        "vocal_range": song_eval.iloc[i]["vocal_range"],
                        "timbre": song_eval.iloc[i]["timbre"],
                        "pronunciation": song_eval.iloc[i]["pronunciation"],
                        "vibrato": song_eval.iloc[i]["vibrato"],
                        "dynamic": song_eval.iloc[i]["dynamic"],
                        "breath_control": song_eval.iloc[i]["breath_control"],
                        "overall_performance": song_eval.iloc[i]["overall_performance"],
                    }
                )

            songs[index] = scores

        audio_files = dl_manager.download_and_extract(_URLS["audio"])
        for fpath in dl_manager.iter_files([audio_files]):
            fname: str = os.path.basename(fpath)
            if fname.endswith(".wav"):
                song_id = os.path.basename(os.path.dirname(fpath))
                singer_id = int(fname.split("(")[1].split(")")[0]) - 1
                songs[song_id][singer_id]["audio"] = fpath

        mel_files = dl_manager.download_and_extract(_URLS["mel"])
        for fpath in dl_manager.iter_files([mel_files]):
            fname = os.path.basename(fpath)
            if fname.endswith(".jpg"):
                song_id = os.path.basename(os.path.dirname(fpath))
                singer_id = int(fname.split("(")[1].split(")")[0]) - 1
                songs[song_id][singer_id]["mel"] = fpath

        split_generator = []
        for key in songs.keys():
            split_generator.append(
                datasets.SplitGenerator(
                    name=key,
                    gen_kwargs={"files": songs[key]},
                )
            )

        return split_generator

    def _generate_examples(self, files):
        for i, item in enumerate(files):
            yield i, {
                "audio": item["audio"],
                "mel": item["mel"],
                "singer_id": i,
                "pitch": item["pitch"],
                "rhythm": item["rhythm"],
                "vocal_range": item["vocal_range"],
                "timbre": item["timbre"],
                "pronunciation": item["pronunciation"],
                "vibrato": item["vibrato"],
                "dynamic": item["dynamic"],
                "breath_control": item["breath_control"],
                "overall_performance": item["overall_performance"],
            }