|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Adversarial examples based on the Librispeech automatic speech recognition dataset.""" |
|
|
|
from __future__ import absolute_import, division, print_function |
|
|
|
import glob |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{panayotov2015librispeech, |
|
title={Librispeech: an ASR corpus based on public domain audio books}, |
|
author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev}, |
|
booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on}, |
|
pages={5206--5210}, |
|
year={2015}, |
|
organization={IEEE} |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
LibriSpeech is a corpus of approximately 1000 hours of read English speech with sampling rate of 16 kHz, |
|
prepared by Vassil Panayotov with the assistance of Daniel Povey. The data is derived from read |
|
audiobooks from the LibriVox project, and has been carefully segmented and aligned. |
|
Note that in order to limit the required storage for preparing this dataset, the audio |
|
is stored in the .flac format and is not converted to a float32 array. To convert, the audio |
|
file to a float32 array, please make use of the `.map()` function as follows: |
|
```python |
|
import soundfile as sf |
|
def map_to_array(batch): |
|
speech_array, _ = sf.read(batch["file"]) |
|
batch["speech"] = speech_array |
|
return batch |
|
dataset = dataset.map(map_to_array, remove_columns=["file"]) |
|
``` |
|
""" |
|
|
|
_DL_URL = "https://drive.google.com/file/d/1oaBhaHlY4TD2JcvenR-6OZNIsyPG8OGN/view?usp=sharing" |
|
|
|
|
|
class LibrispeechASRConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for LibriSpeechASR.""" |
|
|
|
def __init__(self, **kwargs): |
|
""" |
|
Args: |
|
data_dir: `string`, the path to the folder containing the files in the |
|
downloaded .tar |
|
citation: `string`, citation for the data set |
|
url: `string`, url for information about the data set |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(LibrispeechASRConfig, self).__init__( |
|
version=datasets.Version("2.1.0", ""), **kwargs) |
|
|
|
|
|
class LibrispeechASR(datasets.GeneratorBasedBuilder): |
|
"""Librispeech dataset.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
LibrispeechASRConfig(name="adv", description="'Adversarial' speech."), |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"file": datasets.Value("string"), |
|
"audio": datasets.features.Audio(sampling_rate=16_000), |
|
"text": datasets.Value("string"), |
|
"id": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=("speech", "text"), |
|
homepage=_DL_URL, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
archive_path = dl_manager.download_and_extract( |
|
_DL_URL) |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={ |
|
"archive_path": archive_path["dev"], "split_name": f"dev_{self.config.name}"}), |
|
] |
|
|
|
def _generate_examples(self, archive_path, split_name): |
|
"""Generate examples from a Librispeech archive_path.""" |
|
transcripts_glob = os.path.join(archive_path, "**/*.txt") |
|
if split_name.endswith("adv-txt"): |
|
split_folder = split_name[:-7] |
|
use_adv_transcript = True |
|
else: |
|
assert split_name.endswith("nat-txt") |
|
split_folder = split_name[:-7] |
|
use_adv_transcript = False |
|
|
|
for transcript_file in glob.glob(transcripts_glob): |
|
path = os.path.dirname(transcript_file) |
|
audio_path = os.path.join(path, split_folder) |
|
with open(os.path.join(path, transcript_file)) as f: |
|
for line in f: |
|
line = line.strip() |
|
key, og_transcript, adv_transcript = line.split(",", 2) |
|
transcript = adv_transcript if use_adv_transcript else og_transcript |
|
suffix = "adv" if use_adv_transcript else "nat" |
|
audio_file = f"{key}_{suffix}.wav" |
|
speaker_id, chapter_id = [int(el) |
|
for el in key.split("-")[:2]] |
|
split_key = key+"_"+suffix+"_"+split_name |
|
example = { |
|
"id": split_key, |
|
"speaker_id": speaker_id, |
|
"chapter_id": chapter_id, |
|
"file": os.path.join(audio_path, audio_file), |
|
"audio": os.path.join(audio_path, audio_file), |
|
"text": transcript, |
|
} |
|
yield split_key, example |
|
|