phdkhanh2507 commited on
Commit
569d9f2
·
verified ·
1 Parent(s): e3ba429

Delete vietnamese-speaker-lip-clip-v1.py

Browse files
Files changed (1) hide show
  1. vietnamese-speaker-lip-clip-v1.py +0 -145
vietnamese-speaker-lip-clip-v1.py DELETED
@@ -1,145 +0,0 @@
1
- import os
2
- import datasets
3
- from huggingface_hub import HfFileSystem
4
- from typing import List, Tuple
5
-
6
-
7
- logger = datasets.logging.get_logger(__name__)
8
- fs = HfFileSystem()
9
-
10
-
11
- _CITATION = """
12
-
13
- """
14
- _DESCRIPTION = """
15
- This dataset extracts the mouth region from short clips of Vietnamese speakers.
16
- """
17
- _HOMEPAGE = "https://github.com/tanthinhdt/vietnamese-av-asr"
18
- _MAIN_REPO_PATH = "datasets/phdkhanh2507/vietnamese-speaker-lip-clip"
19
- _REPO_URL = "https://huggingface.co/{}/resolve/main"
20
- _URLS = {
21
- "meta": f"{_REPO_URL}/metadata/".format(_MAIN_REPO_PATH) + "{channel}.parquet",
22
- "visual": f"{_REPO_URL}/visual/".format(_MAIN_REPO_PATH) + "{channel}.zip",
23
- }
24
- _CONFIGS = ["all"]
25
- if fs.exists(_MAIN_REPO_PATH + "/metadata"):
26
- _CONFIGS.extend([
27
- os.path.basename(file_name)[:-8]
28
- for file_name in fs.listdir(_MAIN_REPO_PATH + "/metadata", detail=False)
29
- if file_name.endswith(".parquet")
30
- ])
31
-
32
-
33
- class VietnameseSpeakerLipClipConfig(datasets.BuilderConfig):
34
- """Vietnamese Speaker Clip configuration."""
35
-
36
- def __init__(self, name, **kwargs):
37
- """
38
- :param name: Name of subset.
39
- :param kwargs: Arguments.
40
- """
41
- super(VietnameseSpeakerLipClipConfig, self).__init__(
42
- name=name,
43
- version=datasets.Version("1.0.0"),
44
- description=_DESCRIPTION,
45
- **kwargs,
46
- )
47
-
48
-
49
- class VietnameseSpeakerLipClip(datasets.GeneratorBasedBuilder):
50
- """Vietnamese Speaker Clip dataset."""
51
-
52
- BUILDER_CONFIGS = [VietnameseSpeakerLipClipConfig(name) for name in _CONFIGS]
53
- DEFAULT_CONFIG_NAME = "all"
54
-
55
- def _info(self) -> datasets.DatasetInfo:
56
- features = datasets.Features({
57
- "id": datasets.Value("string"),
58
- "channel": datasets.Value("string"),
59
- "visual": datasets.Value("string"),
60
- "duration": datasets.Value("float64"),
61
- "fps": datasets.Value("int8"),
62
- "audio": datasets.Value("string"),
63
- "sampling_rate": datasets.Value("int64"),
64
- })
65
-
66
- return datasets.DatasetInfo(
67
- description=_DESCRIPTION,
68
- features=features,
69
- homepage=_HOMEPAGE,
70
- citation=_CITATION,
71
- )
72
-
73
- def _split_generators(
74
- self, dl_manager: datasets.DownloadManager
75
- ) -> List[datasets.SplitGenerator]:
76
- """
77
- Get splits.
78
- :param dl_manager: Download manager.
79
- :return: Splits.
80
- """
81
- config_names = _CONFIGS[1:] if self.config.name == "all" else [self.config.name]
82
-
83
- metadata_paths = dl_manager.download(
84
- [_URLS["meta"].format(channel=channel) for channel in config_names]
85
- )
86
- visual_dirs = dl_manager.download_and_extract(
87
- [_URLS["visual"].format(channel=channel) for channel in config_names]
88
- )
89
- # audio_dirs = dl_manager.download_and_extract(
90
- # [_URLS["audio"].format(channel=channel) for channel in config_names]
91
- # )
92
-
93
- visual_dict = {
94
- channel: visual_dir for channel, visual_dir in zip(config_names, visual_dirs)
95
- }
96
- # audio_dict = {
97
- # channel: audio_dir for channel, audio_dir in zip(config_names, audio_dirs)
98
- # }
99
-
100
- return [
101
- datasets.SplitGenerator(
102
- name=datasets.Split.TRAIN,
103
- gen_kwargs={
104
- "metadata_paths": metadata_paths,
105
- "visual_dict": visual_dict,
106
- # "audio_dict": audio_dict,
107
- },
108
- ),
109
- ]
110
-
111
- def _generate_examples(
112
- self, metadata_paths: List[str],
113
- visual_dict: dict,
114
- audio_dict: dict,
115
- ) -> Tuple[int, dict]:
116
- """
117
- Generate examples from metadata.
118
- :param metadata_paths: Paths to metadata.
119
- :param visual_dict: Paths to directory containing videos.
120
- :param audio_dict: Paths to directory containing audios.
121
- :yield: Example.
122
- """
123
- dataset = datasets.load_dataset(
124
- "parquet",
125
- data_files=metadata_paths,
126
- split="train",
127
- )
128
- for i, sample in enumerate(dataset):
129
- channel = sample["channel"]
130
- visual_path = os.path.join(
131
- visual_dict[channel], channel, sample["id"] + ".mp4"
132
- )
133
- # audio_path = os.path.join(
134
- # audio_dict[channel], channel, sample["id"] + ".wav"
135
- # )
136
-
137
- yield i, {
138
- "id": sample["id"],
139
- "channel": channel,
140
- "visual": visual_path,
141
- "duration": sample["duration"],
142
- "fps": sample["fps"],
143
- # "audio": audio_path,
144
- "sampling_rate": sample["sampling_rate"],
145
- }