Datasets:

ArXiv:
License:
evi / evi.py
polinaeterna's picture
polinaeterna HF staff
update
00846d9
raw
history blame
5.62 kB
# coding=utf-8
# Copyright 2022 The PolyAI and HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
EVI is a challenging spoken multilingual dataset with 5,506 dialogues in English, Polish, and French
that can be used for benchmarking and developing knowledge-based enrolment, identification, and identification
for spoken dialogue systems.
"""
import csv
import json
import os
from pathlib import Path
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{Spithourakis2022evi,
author = {Georgios P. Spithourakis and Ivan Vuli\'{c} and Micha\l{} Lis and I\~{n}igo Casanueva and Pawe\l{} Budzianowski},
title = {{EVI}: Multilingual Spoken Dialogue Tasks and Dataset for Knowledge-Based Enrolment, Verification, and Identification},
year = {2022},
note = {Data available at https://github.com/PolyAI-LDN/evi-paper},
url = {https://arxiv.org/abs/2204.13496},
booktitle = {Findings of NAACL (publication pending)}
}
""" # noqa
_ALL_CONFIGS = sorted([
"en-GB", "fr-FR", "pl-PL"
])
_LANGS = sorted(["en", "fr", "pl"])
_DESCRIPTION = """
EVI is a challenging spoken multilingual dataset with 5,506 dialogues in English, Polish, and French
that can be used for benchmarking and developing knowledge-based enrolment, identification, and identification
for spoken dialogue systems.
""" # noqa
_LICENSE = "CC-BY-4.0"
_HOMEPAGE = "https://github.com/PolyAI-LDN/evi-paper"
_BASE_URL = "https://huggingface.co/datasets/PolyAI/evi/resolve/main/data"
_TEXT_URL = {
lang: os.path.join(_BASE_URL, f"dialogues.{lang.split('-')[0]}.tsv") for lang in _LANGS
}
_AUDIO_DATA_URL = "https://poly-public-data.s3.eu-west-2.amazonaws.com/evi-paper/audios.zip" # noqa
_VERSION = datasets.Version("0.0.1", "")
class EviConfig(datasets.BuilderConfig):
"""BuilderConfig for EVI"""
def __init__(
self, name, *args, **kwargs
):
super().__init__(name=name, *args, **kwargs)
self.languages = _LANGS if name == "all" else [name.split("-")[0]] # all langs if config == "all"
class Evi(datasets.GeneratorBasedBuilder):
DEFAULT_WRITER_BATCH_SIZE = 512
BUILDER_CONFIGS = [EviConfig(name) for name in _ALL_CONFIGS + ["all"]]
def _info(self):
features = datasets.Features(
{
"language": datasets.ClassLabel(names=_LANGS),
"dialogue_id": datasets.Value("string"),
"speaker_id": datasets.Value("string"),
"turn_id": datasets.Value("int32"),
"target_profile_id": datasets.Value("string"),
"asr_transcription": datasets.Value("string"),
"asr_nbest": datasets.Sequence(datasets.Value("string")),
"path": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=8_000),
}
)
return datasets.DatasetInfo(
version=_VERSION,
description=_DESCRIPTION,
license=_LICENSE,
citation=_CITATION,
features=features,
homepage=_HOMEPAGE
)
def _split_generators(self, dl_manager):
langs = self.config.languages
lang2text_urls = {
lang: _TEXT_URL[lang] for lang in langs
}
lang2text_paths = dl_manager.download_and_extract(lang2text_urls)
audio_data_path = dl_manager.download_and_extract(_AUDIO_DATA_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"audio_data_path": audio_data_path,
"text_paths": lang2text_paths,
},
)
]
def _generate_examples(self, audio_data_path, text_paths):
for lang in text_paths.keys():
text_path = text_paths[lang]
with open(text_path, encoding="utf-8") as fin:
reader = csv.DictReader(
fin, delimiter="\t", skipinitialspace=True
)
for dictrow in reader:
dialogue_id = dictrow["dialogue_id"]
turn_id = dictrow["turn_num"]
file_path = os.path.join(
"audios",
lang,
dialogue_id,
f'{turn_id}.wav'
)
full_path = os.path.join(audio_data_path, file_path)
yield file_path, {
"language": lang,
"dialogue_id": dialogue_id,
"speaker_id": dictrow["speaker_id"],
"turn_id": turn_id,
"target_profile_id": dictrow["scenario_id"],
"asr_transcription": dictrow["transcription"],
"asr_nbest": json.loads(dictrow["nbest"]),
"path": file_path,
"audio": str(full_path)
}