phdkhanh2507 commited on
Commit
71a501c
·
verified ·
1 Parent(s): 9fa8598

Update transcribed-vietnamese-audio.py

Browse files
Files changed (1) hide show
  1. transcribed-vietnamese-audio.py +180 -180
transcribed-vietnamese-audio.py CHANGED
@@ -1,180 +1,180 @@
1
- import os
2
- import datasets
3
- from huggingface_hub import HfFileSystem
4
- from typing import List, Tuple
5
-
6
-
7
- logger = datasets.logging.get_logger(__name__)
8
- fs = HfFileSystem()
9
-
10
-
11
- _CITATION = """
12
-
13
- """
14
- _DESCRIPTION = """
15
- This dataset contains transcripts from audio of Vietnamese speakers.
16
- """
17
- _HOMEPAGE = "https://github.com/duytran1332002/vlr"
18
- _MAIN_REPO_PATH = "datasets/phdkhanh2507/transcribed-vietnamese-audio"
19
- _VISUAL_REPO_PATH = "datasets/phdkhanh2507/vietnamese-speaker-clip"
20
- _REPO_URL = "https://huggingface.co/{}/resolve/main"
21
- _URLS = {
22
- "meta": f"{_REPO_URL}/metadata/".format(_MAIN_REPO_PATH) + "{channel}.parquet",
23
- "visual": f"{_REPO_URL}/visual/".format(_VISUAL_REPO_PATH) + "{channel}.zip",
24
- "audio": f"{_REPO_URL}/audio/".format(_MAIN_REPO_PATH) + "{channel}.zip",
25
- "transcript": f"{_REPO_URL}/transcript/".format(_MAIN_REPO_PATH) + "{channel}.zip",
26
- }
27
- _CONFIGS = ["all"]
28
- if fs.exists(_MAIN_REPO_PATH + "/metadata"):
29
- _CONFIGS.extend([
30
- os.path.basename(file_name)[:-8]
31
- for file_name in fs.listdir(_MAIN_REPO_PATH + "/metadata", detail=False)
32
- if file_name.endswith(".parquet")
33
- ])
34
-
35
-
36
- class TranscribedVietnameseAudioConfig(datasets.BuilderConfig):
37
- """Transcribed Vietnamese Audio configuration."""
38
-
39
- def __init__(self, name, **kwargs):
40
- """
41
- :param name: Name of subset.
42
- :param kwargs: Arguments.
43
- """
44
- super().__init__(
45
- name=name,
46
- version=datasets.Version("1.0.0"),
47
- description=_DESCRIPTION,
48
- **kwargs,
49
- )
50
-
51
-
52
- class TranscribedVietnameseAudio(datasets.GeneratorBasedBuilder):
53
- """Transcribed Vietnamese Audio dataset."""
54
-
55
- BUILDER_CONFIGS = [TranscribedVietnameseAudioConfig(name) for name in _CONFIGS]
56
- DEFAULT_CONFIG_NAME = "all"
57
-
58
- def _info(self) -> datasets.DatasetInfo:
59
- features = datasets.Features({
60
- "id": datasets.Value("string"),
61
- "channel": datasets.Value("string"),
62
- "visual": datasets.Value("string"),
63
- "duration": datasets.Value("float64"),
64
- "fps": datasets.Value("int8"),
65
- "audio": datasets.Value("binary"),
66
- "sampling_rate": datasets.Value("int64"),
67
- "transcript": datasets.Value("string"),
68
- })
69
-
70
- return datasets.DatasetInfo(
71
- description=_DESCRIPTION,
72
- features=features,
73
- homepage=_HOMEPAGE,
74
- citation=_CITATION,
75
- )
76
-
77
- def _split_generators(
78
- self, dl_manager: datasets.DownloadManager
79
- ) -> List[datasets.SplitGenerator]:
80
- """
81
- Get splits.
82
- :param dl_manager: Download manager.
83
- :return: Splits.
84
- """
85
- config_names = _CONFIGS[1:] if self.config.name == "all" else [self.config.name]
86
-
87
- metadata_paths = dl_manager.download(
88
- [_URLS["meta"].format(channel=channel) for channel in config_names]
89
- )
90
- visual_dirs = dl_manager.download_and_extract(
91
- [_URLS["visual"].format(channel=channel) for channel in config_names]
92
- )
93
- audio_dirs = dl_manager.download_and_extract(
94
- [_URLS["audio"].format(channel=channel) for channel in config_names]
95
- )
96
- transcript_dirs = dl_manager.download_and_extract(
97
- [_URLS["transcript"].format(channel=channel) for channel in config_names]
98
- )
99
-
100
- visual_dict = {
101
- channel: visual_dir for channel, visual_dir in zip(config_names, visual_dirs)
102
- }
103
- audio_dict = {
104
- channel: audio_dir for channel, audio_dir in zip(config_names, audio_dirs)
105
- }
106
- transcript_dict = {
107
- channel: transcript_dir
108
- for channel, transcript_dir in zip(config_names, transcript_dirs)
109
- }
110
-
111
- return [
112
- datasets.SplitGenerator(
113
- name=datasets.Split.TRAIN,
114
- gen_kwargs={
115
- "metadata_paths": metadata_paths,
116
- "visual_dict": visual_dict,
117
- "audio_dict": audio_dict,
118
- "transcript_dict": transcript_dict,
119
- },
120
- ),
121
- ]
122
-
123
- def _generate_examples(
124
- self, metadata_paths: List[str],
125
- visual_dict: dict,
126
- audio_dict: dict,
127
- transcript_dict: dict,
128
- ) -> Tuple[int, dict]:
129
- """
130
- Generate examples from metadata.
131
- :param metadata_paths: Paths to metadata.
132
- :param audio_dict: Paths to directory containing audios.
133
- :param transcript_dict: Paths to directory containing transcripts.
134
- :yield: Example.
135
- """
136
- dataset = datasets.load_dataset(
137
- "parquet",
138
- data_files=metadata_paths,
139
- split="train",
140
- )
141
- for i, sample in enumerate(dataset):
142
- channel = sample["channel"]
143
- visual_path = os.path.join(
144
- visual_dict[channel], channel, sample["id"] + ".mp4"
145
- )
146
- audio_path = os.path.join(
147
- audio_dict[channel], channel, sample["id"] + ".wav"
148
- )
149
- transcript_path = os.path.join(
150
- transcript_dict[channel], channel, sample["id"] + ".txt"
151
- )
152
-
153
- yield i, {
154
- "id": sample["id"],
155
- "channel": channel,
156
- "visual": visual_path,
157
- "duration": sample["duration"],
158
- "fps": sample["fps"],
159
- "audio": self.__get_binary_data(audio_path),
160
- "sampling_rate": sample["sampling_rate"],
161
- "transcript": self.__get_text_data(transcript_path),
162
- }
163
-
164
- def __get_binary_data(self, path: str) -> bytes:
165
- """
166
- Get binary data from path.
167
- :param path: Path to file.
168
- :return: Binary data.
169
- """
170
- with open(path, "rb") as f:
171
- return f.read()
172
-
173
- def __get_text_data(self, path: str) -> str:
174
- """
175
- Get transcript from path.
176
- :param path: Path to transcript.
177
- :return: Transcript.
178
- """
179
- with open(path, "r", encoding="utf-8") as f:
180
- return f.read().strip()
 
1
+ import os
2
+ import datasets
3
+ from huggingface_hub import HfFileSystem
4
+ from typing import List, Tuple
5
+
6
+
7
+ logger = datasets.logging.get_logger(__name__)
8
+ fs = HfFileSystem()
9
+
10
+
11
+ _CITATION = """
12
+
13
+ """
14
+ _DESCRIPTION = """
15
+ This dataset contains transcripts from audio of Vietnamese speakers.
16
+ """
17
+ _HOMEPAGE = "https://github.com/duytran1332002/vlr"
18
+ _MAIN_REPO_PATH = "datasets/phdkhanh2507/transcribed-vietnamese-audio"
19
+ _VISUAL_REPO_PATH = "datasets/phdkhanh2507/vietnamese-speaker-clip"
20
+ _REPO_URL = "https://huggingface.co/{}/resolve/main"
21
+ _URLS = {
22
+ "meta": f"{_REPO_URL}/metadata/".format(_MAIN_REPO_PATH) + "{channel}.parquet",
23
+ "visual": f"{_REPO_URL}/visual/".format(_VISUAL_REPO_PATH) + "{channel}.zip",
24
+ "transcript": f"{_REPO_URL}/transcript/".format(_MAIN_REPO_PATH) + "{channel}.zip",
25
+ }
26
+
27
+ _CONFIGS = ["all"]
28
+ if fs.exists(_MAIN_REPO_PATH + "/metadata"):
29
+ _CONFIGS.extend([
30
+ os.path.basename(file_name)[:-8]
31
+ for file_name in fs.listdir(_MAIN_REPO_PATH + "/metadata", detail=False)
32
+ if file_name.endswith(".parquet")
33
+ ])
34
+
35
+
36
+ class TranscribedVietnameseAudioConfig(datasets.BuilderConfig):
37
+ """Transcribed Vietnamese Audio configuration."""
38
+
39
+ def __init__(self, name, **kwargs):
40
+ """
41
+ :param name: Name of subset.
42
+ :param kwargs: Arguments.
43
+ """
44
+ super().__init__(
45
+ name=name,
46
+ version=datasets.Version("1.0.0"),
47
+ description=_DESCRIPTION,
48
+ **kwargs,
49
+ )
50
+
51
+
52
+ class TranscribedVietnameseAudio(datasets.GeneratorBasedBuilder):
53
+ """Transcribed Vietnamese Audio dataset."""
54
+
55
+ BUILDER_CONFIGS = [TranscribedVietnameseAudioConfig(name) for name in _CONFIGS]
56
+ DEFAULT_CONFIG_NAME = "all"
57
+
58
+ def _info(self) -> datasets.DatasetInfo:
59
+ features = datasets.Features({
60
+ "id": datasets.Value("string"),
61
+ "channel": datasets.Value("string"),
62
+ "visual": datasets.Value("string"),
63
+ "duration": datasets.Value("float64"),
64
+ "fps": datasets.Value("int8"),
65
+ "audio": datasets.Value("binary"),
66
+ "sampling_rate": datasets.Value("int64"),
67
+ "transcript": datasets.Value("string"),
68
+ })
69
+
70
+ return datasets.DatasetInfo(
71
+ description=_DESCRIPTION,
72
+ features=features,
73
+ homepage=_HOMEPAGE,
74
+ citation=_CITATION,
75
+ )
76
+
77
+ def _split_generators(
78
+ self, dl_manager: datasets.DownloadManager
79
+ ) -> List[datasets.SplitGenerator]:
80
+ """
81
+ Get splits.
82
+ :param dl_manager: Download manager.
83
+ :return: Splits.
84
+ """
85
+ config_names = _CONFIGS[1:] if self.config.name == "all" else [self.config.name]
86
+
87
+ metadata_paths = dl_manager.download(
88
+ [_URLS["meta"].format(channel=channel) for channel in config_names]
89
+ )
90
+ visual_dirs = dl_manager.download_and_extract(
91
+ [_URLS["visual"].format(channel=channel) for channel in config_names]
92
+ )
93
+ audio_dirs = dl_manager.download_and_extract(
94
+ [_URLS["audio"].format(channel=channel) for channel in config_names]
95
+ )
96
+ transcript_dirs = dl_manager.download_and_extract(
97
+ [_URLS["transcript"].format(channel=channel) for channel in config_names]
98
+ )
99
+
100
+ visual_dict = {
101
+ channel: visual_dir for channel, visual_dir in zip(config_names, visual_dirs)
102
+ }
103
+ audio_dict = {
104
+ channel: audio_dir for channel, audio_dir in zip(config_names, audio_dirs)
105
+ }
106
+ transcript_dict = {
107
+ channel: transcript_dir
108
+ for channel, transcript_dir in zip(config_names, transcript_dirs)
109
+ }
110
+
111
+ return [
112
+ datasets.SplitGenerator(
113
+ name=datasets.Split.TRAIN,
114
+ gen_kwargs={
115
+ "metadata_paths": metadata_paths,
116
+ "visual_dict": visual_dict,
117
+ "audio_dict": audio_dict,
118
+ "transcript_dict": transcript_dict,
119
+ },
120
+ ),
121
+ ]
122
+
123
+ def _generate_examples(
124
+ self, metadata_paths: List[str],
125
+ visual_dict: dict,
126
+ audio_dict: dict,
127
+ transcript_dict: dict,
128
+ ) -> Tuple[int, dict]:
129
+ """
130
+ Generate examples from metadata.
131
+ :param metadata_paths: Paths to metadata.
132
+ :param audio_dict: Paths to directory containing audios.
133
+ :param transcript_dict: Paths to directory containing transcripts.
134
+ :yield: Example.
135
+ """
136
+ dataset = datasets.load_dataset(
137
+ "parquet",
138
+ data_files=metadata_paths,
139
+ split="train",
140
+ )
141
+ for i, sample in enumerate(dataset):
142
+ channel = sample["channel"]
143
+ visual_path = os.path.join(
144
+ visual_dict[channel], channel, sample["id"] + ".mp4"
145
+ )
146
+ audio_path = os.path.join(
147
+ audio_dict[channel], channel, sample["id"] + ".wav"
148
+ )
149
+ transcript_path = os.path.join(
150
+ transcript_dict[channel], channel, sample["id"] + ".txt"
151
+ )
152
+
153
+ yield i, {
154
+ "id": sample["id"],
155
+ "channel": channel,
156
+ "visual": visual_path,
157
+ "duration": sample["duration"],
158
+ "fps": sample["fps"],
159
+ "audio": self.__get_binary_data(audio_path),
160
+ "sampling_rate": sample["sampling_rate"],
161
+ "transcript": self.__get_text_data(transcript_path),
162
+ }
163
+
164
+ def __get_binary_data(self, path: str) -> bytes:
165
+ """
166
+ Get binary data from path.
167
+ :param path: Path to file.
168
+ :return: Binary data.
169
+ """
170
+ with open(path, "rb") as f:
171
+ return f.read()
172
+
173
+ def __get_text_data(self, path: str) -> str:
174
+ """
175
+ Get transcript from path.
176
+ :param path: Path to transcript.
177
+ :return: Transcript.
178
+ """
179
+ with open(path, "r", encoding="utf-8") as f:
180
+ return f.read().strip()