test2 / test2.py
j-krzywdziak's picture
new test
edd945d
# coding=utf-8
# Lint as: python3
"""test set"""
import csv
import os
import json
import datasets
from datasets.utils.py_utils import size_str
from tqdm import tqdm
import os
import datasets
_CITATION = """\
@inproceedings{panayotov2015librispeech,
title={Librispeech: an ASR corpus based on public domain audio books},
author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev},
booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on},
pages={5206--5210},
year={2015},
organization={IEEE}
}
"""
_DESCRIPTION = """\
Lorem ipsum
"""
_URL = "https://huggingface.co/datasets/j-krzywdziak/test2"
_AUDIO_URL = "https://huggingface.co/datasets/j-krzywdziak/test2/resolve/main"
_DATA_URL = "https://huggingface.co/datasets/j-krzywdziak/test2/raw/main"
_DL_URLS = {
"clean": {
"of": _AUDIO_URL + "/clean/of/examples.zip",
"on": _AUDIO_URL + "/clean/on/examples.zip",
"example": _DATA_URL + "/clean/example.tsv",
"keyword": _DATA_URL + "/clean/keyword.tsv"
},
"other": {
"of": _AUDIO_URL + "/other/of/examples.zip",
"on": _AUDIO_URL + "/other/on/examples.zip",
"example": _DATA_URL + "/other/example.tsv",
"keyword": _DATA_URL + "/other/keyword.tsv"
},
"all": {
"clean.of": _AUDIO_URL + "/clean/of/examples.zip",
"clean.on": _AUDIO_URL + "/clean/on/examples.zip",
"other.of": _AUDIO_URL + "/other/of/examples.zip",
"other.on": _AUDIO_URL + "/other/on/examples.zip",
"clean.example": _DATA_URL + "/clean/example.tsv",
"clean.keyword": _DATA_URL + "/clean/keyword.tsv",
"other.example": _DATA_URL + "/other/example.tsv",
"other.keyword": _DATA_URL + "/other/keyword.tsv"
},
}
class TestASR(datasets.GeneratorBasedBuilder):
"""Lorem ipsum."""
VERSION = "0.0.0"
DEFAULT_CONFIG_NAME = "all"
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="clean", description="'Clean' speech."),
datasets.BuilderConfig(name="other", description="'Other', more challenging, speech."),
datasets.BuilderConfig(name="all", description="Combined clean and other dataset."),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"path": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=16_000),
"ngram": datasets.Value("string"),
"type": datasets.Value("string")
}
),
supervised_keys=("file", "text"),
homepage=_URL,
citation=_CITATION
)
def _split_generators(self, dl_manager):
archive_path = dl_manager.download(_DL_URLS[self.config.name])
# (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {}
if self.config.name == "clean":
of_split = [
datasets.SplitGenerator(
name="of",
gen_kwargs={
"local_extracted_archive": local_extracted_archive.get("of"),
"files": dl_manager.iter_archive(archive_path["of"]),
"examples": archive_path["example"],
"keywords": archive_path["keyword"]
},
)
]
on_split = [
datasets.SplitGenerator(
name="on",
gen_kwargs={
"local_extracted_archive": local_extracted_archive.get("on"),
"files": dl_manager.iter_archive(archive_path["on"]),
"examples": archive_path["example"],
"keywords": archive_path["keyword"]
},
)
]
elif self.config.name == "other":
of_split = [
datasets.SplitGenerator(
name="of",
gen_kwargs={
"local_extracted_archive": local_extracted_archive.get("of"),
"files": dl_manager.iter_archive(archive_path["of"]),
"examples": archive_path["example"],
"keywords": archive_path["keyword"]
},
)
]
on_split = [
datasets.SplitGenerator(
name="on",
gen_kwargs={
"local_extracted_archive": local_extracted_archive.get("on"),
"files": dl_manager.iter_archive(archive_path["on"]),
"examples": archive_path["example"],
"keywords": archive_path["keyword"]
},
)
]
elif self.config.name == "all":
of_split = [
datasets.SplitGenerator(
name="clean.of",
gen_kwargs={
"local_extracted_archive": local_extracted_archive.get("clean.of"),
"files": dl_manager.iter_archive(archive_path["clean.of"]),
"examples": archive_path["clean.example"],
"keywords": archive_path["clean.keyword"]
},
),
datasets.SplitGenerator(
name="other.of",
gen_kwargs={
"local_extracted_archive": local_extracted_archive.get("other.of"),
"files": dl_manager.iter_archive(archive_path["other.of"]),
"examples": archive_path["other.example"],
"keywords": archive_path["other.keyword"]
}
)
]
on_split = [
datasets.SplitGenerator(
name="clean.on",
gen_kwargs={
"local_extracted_archive": local_extracted_archive.get("clean.on"),
"files": dl_manager.iter_archive(archive_path["clean.on"]),
"examples": archive_path["clean.example"],
"keywords": archive_path["clean.keyword"]
},
),
datasets.SplitGenerator(
name="other.on",
gen_kwargs={
"local_extracted_archive": local_extracted_archive.get("other.on"),
"files": dl_manager.iter_archive(archive_path["other.on"]),
"examples": archive_path["other.example"],
"keywords": archive_path["other.keyword"]
}
)
]
return on_split + of_split
def _generate_examples(self, files, local_extracted_archive, examples, keywords):
"""Lorem ipsum."""
audio_data = {}
transcripts = []
key = 0
for path, f in files:
audio_data[path] = f.read()
with open(keywords, encoding="utf-8") as f:
next(f)
for row in f:
r = row.split("\t")
path = 'examples/'+r[0]
ngram = r[1]
transcripts.append({
"path": path,
"ngram": ngram,
"type": "keyword"
})
with open(examples, encoding="utf-8") as f2:
for row in f2:
r = row.split("\t")
path = 'examples/'+r[0]
ngram = r[1]
transcripts.append({
"path": path,
"ngram": ngram,
"type": "example"
})
if audio_data and len(audio_data) == len(transcripts):
for transcript in transcripts:
audio = {"path": transcript["path"], "bytes": audio_data[transcript["path"]]}
yield key, {"audio": audio, **transcript}
key += 1
audio_data = {}
transcripts = []