|
import os |
|
import datasets |
|
from huggingface_hub import HfFileSystem |
|
from typing import List, Tuple |
|
|
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
fs = HfFileSystem() |
|
|
|
|
|
_CITATION = """ |
|
|
|
""" |
|
_DESCRIPTION = """ |
|
This dataset contains transcripts from audio of Vietnamese speakers. |
|
""" |
|
_HOMEPAGE = "https://github.com/tanthinhdt/vietnamese-av-asr" |
|
_MAIN_REPO_PATH = "datasets/phdkhanh2507/transcribed-vietnamese-audio" |
|
_REPO_URL = "https://huggingface.co/{}/resolve/main" |
|
_URLS = { |
|
"meta": f"{_REPO_URL}/metadata/".format(_MAIN_REPO_PATH) + "{id}.parquet", |
|
} |
|
|
|
_CONFIGS = ["all"] |
|
if fs.exists(_MAIN_REPO_PATH + "/metadata"): |
|
_CONFIGS.extend([ |
|
os.path.basename(file_name)[:-8] |
|
for file_name in fs.listdir(_MAIN_REPO_PATH + "/metadata", detail=False) |
|
if file_name.endswith(".parquet") |
|
]) |
|
|
|
|
|
class TranscribedVietnameseAudioConfig(datasets.BuilderConfig): |
|
"""Transcribed Vietnamese Audio configuration.""" |
|
|
|
|
|
def __init__(self, name, **kwargs): |
|
""" |
|
:param name: Name of subset. |
|
:param kwargs: Arguments. |
|
""" |
|
super().__init__( |
|
name=name, |
|
version=datasets.Version("1.0.0"), |
|
description=_DESCRIPTION, |
|
**kwargs, |
|
) |
|
|
|
|
|
class TranscribedVietnameseAudio(datasets.GeneratorBasedBuilder): |
|
"""Transcribed Vietnamese Audio dataset.""" |
|
|
|
BUILDER_CONFIGS = [TranscribedVietnameseAudioConfig(name) for name in _CONFIGS] |
|
DEFAULT_CONFIG_NAME = "all" |
|
|
|
def _info(self) -> datasets.DatasetInfo: |
|
features = datasets.Features({ |
|
"id": datasets.Value("string"), |
|
"chunk_id": datasets.Value("string"), |
|
"video_fps": datasets.Value("int8"), |
|
"audio_fps": datasets.Value("int64"), |
|
"transcript": datasets.Value("string"), |
|
}) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators( |
|
self, dl_manager: datasets.DownloadManager |
|
) -> List[datasets.SplitGenerator]: |
|
""" |
|
Get splits. |
|
:param dl_manager: Download manager. |
|
:return: Splits. |
|
""" |
|
config_names = _CONFIGS[1:] if self.config.name == "all" else [self.config.name] |
|
|
|
metadata_paths = dl_manager.download( |
|
[_URLS["meta"].format(id=id) for id in config_names] |
|
) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"metadata_paths": metadata_paths, |
|
}, |
|
|
|
), |
|
] |
|
|
|
def _generate_examples( |
|
self, metadata_paths: List[str], |
|
) -> Tuple[int, dict]: |
|
""" |
|
Generate examples from metadata. |
|
:param metadata_paths: Paths to metadata. |
|
:yield: Example. |
|
""" |
|
dataset = datasets.load_dataset( |
|
"parquet", |
|
data_files=metadata_paths, |
|
split="train", |
|
) |
|
for i, sample in enumerate(dataset): |
|
|
|
yield i, { |
|
"id": sample["id"], |
|
"chunk_id": sample["chunk_id"], |
|
"video_fps": sample["video_fps"], |
|
"audio_fps": sample["audio_fps"], |
|
"transcript": sample["transcript"], |
|
} |
|
|
|
def __get_binary_data(self, path: str) -> bytes: |
|
""" |
|
Get binary data from path. |
|
:param path: Path to file. |
|
:return: Binary data. |
|
""" |
|
with open(path, "rb") as f: |
|
return f.read() |
|
|
|
def __get_text_data(self, path: str) -> str: |
|
""" |
|
Get transcript from path. |
|
:param path: Path to transcript. |
|
:return: Transcript. |
|
""" |
|
with open(path, "r", encoding="utf-8") as f: |
|
return f.read().strip() |
|
|