HoneyTian commited on
Commit
0be1a5f
·
1 Parent(s): c1c1f85

first commit

Browse files
Files changed (6) hide show
  1. .gitignore +9 -0
  2. README.md +7 -0
  3. demand.py +128 -0
  4. main.py +68 -0
  5. project_settings.py +12 -0
  6. requirements.txt +3 -0
.gitignore ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+
2
+ .idea/
3
+ .git/
4
+
5
+ **/__pycache__/
6
+ **/hub_datasets/
7
+
8
+ /data/
9
+ /dotenv/
README.md CHANGED
@@ -1,3 +1,10 @@
1
  ---
2
  license: apache-2.0
 
 
3
  ---
 
 
 
 
 
 
1
  ---
2
  license: apache-2.0
3
+ size_categories:
4
+ - 100M<n<1B
5
  ---
6
+ ## DEMAND: a collection of multi-channel recordings of acoustic noise in diverse environments
7
+
8
+ DEMAND: Diverse Environments Multichannel Acoustic Noise Database
9
+
10
+ https://zenodo.org/records/1227121
demand.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ from glob import glob
4
+ import json
5
+ import os
6
+ from pathlib import Path
7
+
8
+ import datasets
9
+
10
+
11
+ _DATA_URL_MAP = {
12
+ "kitchen_16k": "https://zenodo.org/records/1227121/files/DKITCHEN_16k.zip?download=1",
13
+ "kitchen_48k": "https://zenodo.org/records/1227121/files/DKITCHEN_48k.zip?download=1",
14
+ "living_16k": "https://zenodo.org/records/1227121/files/DLIVING_16k.zip?download=1",
15
+ "living_48k": "https://zenodo.org/records/1227121/files/DLIVING_48k.zip?download=1",
16
+ "washing_16k": "https://zenodo.org/records/1227121/files/DWASHING_16k.zip?download=1",
17
+ "washing_48k": "https://zenodo.org/records/1227121/files/DWASHING_48k.zip?download=1",
18
+ "field_16k": "https://zenodo.org/records/1227121/files/NFIELD_16k.zip?download=1",
19
+ "field_48k": "https://zenodo.org/records/1227121/files/NFIELD_48k.zip?download=1",
20
+ "park_16k": "https://zenodo.org/records/1227121/files/NPARK_16k.zip?download=1",
21
+ "park_48k": "https://zenodo.org/records/1227121/files/NPARK_48k.zip?download=1",
22
+ "river_16k": "https://zenodo.org/records/1227121/files/NRIVER_16k.zip?download=1",
23
+ "river_48k": "https://zenodo.org/records/1227121/files/NRIVER_48k.zip?download=1",
24
+ "hallway_16k": "https://zenodo.org/records/1227121/files/OHALLWAY_16k.zip?download=1",
25
+ "hallway_48k": "https://zenodo.org/records/1227121/files/OHALLWAY_48k.zip?download=1",
26
+ "meeting_16k": "https://zenodo.org/records/1227121/files/OMEETING_16k.zip?download=1",
27
+ "meeting_48k": "https://zenodo.org/records/1227121/files/OMEETING_48k.zip?download=1",
28
+ "office_16k": "https://zenodo.org/records/1227121/files/OOFFICE_16k.zip?download=1",
29
+ "office_48k": "https://zenodo.org/records/1227121/files/OOFFICE_48k.zip?download=1",
30
+ "cafeter_16k": "https://zenodo.org/records/1227121/files/PCAFETER_16k.zip?download=1",
31
+ "cafeter_48k": "https://zenodo.org/records/1227121/files/PCAFETER_48k.zip?download=1",
32
+ "resto_16k": "https://zenodo.org/records/1227121/files/PRESTO_16k.zip?download=1",
33
+ "resto_48k": "https://zenodo.org/records/1227121/files/PRESTO_48k.zip?download=1",
34
+ "station_16k": "https://zenodo.org/records/1227121/files/PSTATION_16k.zip?download=1",
35
+ "station_48k": "https://zenodo.org/records/1227121/files/PSTATION_48k.zip?download=1",
36
+
37
+ "cafe_48k": "https://zenodo.org/records/1227121/files/SCAFE_48k.zip?download=1",
38
+
39
+ "square_16k": "https://zenodo.org/records/1227121/files/SPSQUARE_16k.zip?download=1",
40
+ "square_48k": "https://zenodo.org/records/1227121/files/SPSQUARE_48k.zip?download=1",
41
+ "traffic_16k": "https://zenodo.org/records/1227121/files/STRAFFIC_16k.zip?download=1",
42
+ "traffic_48k": "https://zenodo.org/records/1227121/files/STRAFFIC_48k.zip?download=1",
43
+ "bus_16k": "https://zenodo.org/records/1227121/files/TBUS_16k.zip?download=1",
44
+ "bus_48k": "https://zenodo.org/records/1227121/files/TBUS_48k.zip?download=1",
45
+ "car_16k": "https://zenodo.org/records/1227121/files/TCAR_16k.zip?download=1",
46
+ "car_48k": "https://zenodo.org/records/1227121/files/TCAR_48k.zip?download=1",
47
+ "metro_16k": "https://zenodo.org/records/1227121/files/TMETRO_16k.zip?download=1",
48
+ "metro_48k": "https://zenodo.org/records/1227121/files/TMETRO_48k.zip?download=1",
49
+
50
+ }
51
+
52
+ _CITATION = """\
53
+ @dataset{DEMAND,
54
+ author = {Xing Tian},
55
+ title = {DEMAND},
56
+ month = jan,
57
+ year = 2025,
58
+ publisher = {Xing Tian},
59
+ version = {1.0},
60
+ }
61
+ """
62
+
63
+
64
+ _DESCRIPTION = """DEMAND: Diverse Environments Multichannel Acoustic Noise Database"""
65
+
66
+
67
+ _VERSION = datasets.Version("1.0.0")
68
+
69
+
70
+ _BUILDER_CONFIGS = [
71
+ datasets.BuilderConfig(name=key, version=_VERSION, description=key)
72
+ for key in _DATA_URL_MAP.keys()
73
+ ]
74
+
75
+
76
+ class Demand(datasets.GeneratorBasedBuilder):
77
+ VERSION = _VERSION
78
+
79
+ BUILDER_CONFIGS = _BUILDER_CONFIGS
80
+
81
+ def _info(self):
82
+ features = datasets.Features(
83
+ {
84
+ "category": datasets.Value("string"),
85
+ "audio": datasets.Audio(),
86
+ }
87
+ )
88
+
89
+ return datasets.DatasetInfo(
90
+ description=_DESCRIPTION,
91
+ features=features,
92
+ supervised_keys=None,
93
+ homepage="",
94
+ license="",
95
+ citation=_CITATION,
96
+ )
97
+
98
+ def _split_generators(self, dl_manager):
99
+ """Returns SplitGenerators."""
100
+ data_url = _DATA_URL_MAP.get(self.config.name)
101
+ if data_url is None:
102
+ raise AssertionError(f"subset {self.config.name} is not available.")
103
+
104
+ archive_path = dl_manager.download_and_extract(data_url)
105
+
106
+ return [
107
+ datasets.SplitGenerator(
108
+ name=datasets.Split.TRAIN,
109
+ gen_kwargs={"archive_path": archive_path, "dl_manager": dl_manager},
110
+ ),
111
+ ]
112
+
113
+ def _generate_examples(self, archive_path, dl_manager):
114
+ """Yields examples."""
115
+ archive_path = Path(archive_path)
116
+
117
+ sample_idx = 0
118
+ for filename in archive_path.glob("**/*.wav"):
119
+ print(filename)
120
+ yield sample_idx, {
121
+ "category": filename.parts[-2],
122
+ "audio": filename.as_posix(),
123
+ }
124
+ sample_idx += 1
125
+
126
+
127
+ if __name__ == '__main__':
128
+ pass
main.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ ### convert to parquet
5
+
6
+ ```text
7
+ 参考:
8
+ https://huggingface.co/docs/datasets/main/en/cli#convert-to-parquet
9
+
10
+ 在本地命令行输入 `huggingface-cli login` 后输入 token 登录.
11
+ $ huggingface-cli login
12
+
13
+ 在本地命令行登录成功后执行:
14
+ datasets-cli convert_to_parquet intelli-zen/demand --trust_remote_code
15
+
16
+ 执行完成后, 它会自动在 qgyd2021/demand 的 community 开启一个 duscussion.
17
+ You can find your PR to convert the dataset to Parquet at: https://huggingface.co/datasets/qgyd2021/demand/discussions/1
18
+
19
+ 访问链接在下面找到 Merge, 点击将其合并. 之后, 很快 dataser viewer 就可以查看了.
20
+
21
+ ```
22
+
23
+ """
24
+ import argparse
25
+
26
+ from datasets import load_dataset
27
+
28
+ from project_settings import project_path
29
+
30
+
31
+ def get_args():
32
+ parser = argparse.ArgumentParser()
33
+ parser.add_argument(
34
+ "--dataset_path",
35
+ default="demand.py",
36
+ # default="E:/Users/tianx/HuggingDatasets/demand/demand.py",
37
+ type=str
38
+ )
39
+ parser.add_argument("--dataset_name", default="kitchen_16k", type=str)
40
+ parser.add_argument(
41
+ "--dataset_cache_dir",
42
+ default=(project_path / "hub_datasets").as_posix(),
43
+ type=str
44
+ )
45
+ args = parser.parse_args()
46
+ return args
47
+
48
+
49
+ def main():
50
+ args = get_args()
51
+
52
+ dataset = load_dataset(
53
+ path=args.dataset_path,
54
+ name=args.dataset_name,
55
+ cache_dir=args.dataset_cache_dir,
56
+ # streaming=True,
57
+ trust_remote_code=True,
58
+ )
59
+ print(dataset.builder_configs)
60
+ for sample in dataset["train"]:
61
+ print(sample)
62
+ print("-" * 150)
63
+
64
+ return
65
+
66
+
67
+ if __name__ == '__main__':
68
+ main()
project_settings.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import os
4
+ from pathlib import Path
5
+
6
+
7
+ project_path = os.path.abspath(os.path.dirname(__file__))
8
+ project_path = Path(project_path)
9
+
10
+
11
+ if __name__ == '__main__':
12
+ pass
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ datasets
2
+ soundfile
3
+ librosa