|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""JMultiWOZ: Japanese Multi-Domain Wizard-of-Oz dataset for task-oriented dialogue modelling""" |
|
|
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{ohashi-etal-2024-jmultiwoz, |
|
title = "JMultiWOZ: A Large-Scale Japanese Multi-Domain Task-Oriented Dialogue Dataset", |
|
author = "Ohashi, Atsumoto and Hirai, Ryu and Iizuka, Shinya and Higashinaka, Ryuichiro", |
|
booktitle = "Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation", |
|
year = "2024", |
|
url = "", |
|
pages = "", |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
JMultiWOZ is a large-scale Japanese multi-domain task-oriented dialogue dataset. The dataset is collected using |
|
the Wizard-of-Oz (WoZ) methodology, where two human annotators simulate the user and the system. The dataset contains |
|
4,246 dialogues across 6 domains, including restaurant, hotel, attraction, shopping, taxi, and weather. Available |
|
annotations include user goal, dialogue state, and utterances. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://github.com/nu-dialogue/jmultiwoz" |
|
|
|
|
|
_LICENSE = "CC BY-SA 4.0" |
|
|
|
|
|
|
|
|
|
_URLS = { |
|
"original_zip": "https://github.com/nu-dialogue/jmultiwoz/raw/master/dataset/JMultiWOZ_1.0.zip", |
|
} |
|
|
|
|
|
def _flatten_value(values) -> str: |
|
if not isinstance(values, list): |
|
return values |
|
flat_values = [ |
|
_flatten_value(v) if isinstance(v, list) else v for v in values |
|
] |
|
return "[" + ", ".join(flat_values) + "]" |
|
|
|
|
|
class JMultiWOZDataset(datasets.GeneratorBasedBuilder): |
|
VERSION = datasets.Version("1.0.0") |
|
|
|
def _info(self): |
|
|
|
features = datasets.Features({ |
|
"dialogue_id": datasets.Value("int32"), |
|
"dialogue_name": datasets.Value("string"), |
|
"system_name": datasets.Value("string"), |
|
"user_name": datasets.Value("string"), |
|
"goal": datasets.Sequence({ |
|
"domain": datasets.Value("string"), |
|
"task": datasets.Value("string"), |
|
"slot": datasets.Value("string"), |
|
"value": datasets.Value("string"), |
|
}), |
|
"goal_description": datasets.Sequence({ |
|
"domain": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
}), |
|
"turns": datasets.Sequence({ |
|
"turn_id": datasets.Value("int32"), |
|
"speaker": datasets.Value("string"), |
|
"utterance": datasets.Value("string"), |
|
"dialogue_state": { |
|
"belief_state": datasets.Sequence({ |
|
"domain": datasets.Value("string"), |
|
"slot": datasets.Value("string"), |
|
"value": datasets.Value("string"), |
|
}), |
|
"book_state": datasets.Sequence({ |
|
"domain": datasets.Value("string"), |
|
"slot": datasets.Value("string"), |
|
"value": datasets.Value("string"), |
|
}), |
|
"db_result": { |
|
"candidate_entities": datasets.Sequence(datasets.Value("string")), |
|
"active_entity": datasets.Sequence({ |
|
"slot": datasets.Value("string"), |
|
"value": datasets.Value("string"), |
|
}) |
|
}, |
|
"book_result": datasets.Sequence({ |
|
"domain": datasets.Value("string"), |
|
"success": datasets.Value("string"), |
|
"ref": datasets.Value("string"), |
|
}), |
|
} |
|
}), |
|
}) |
|
|
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
|
|
|
|
data_dir = dl_manager.download_and_extract(_URLS["original_zip"]) |
|
split_list_path = os.path.join(data_dir, "JMultiWOZ_1.0/split_list.json") |
|
dialogues_path = os.path.join(data_dir, "JMultiWOZ_1.0/dialogues.json") |
|
with open(split_list_path, "r", encoding="utf-8") as f: |
|
split_list = json.load(f) |
|
with open(dialogues_path, "r", encoding="utf-8") as f: |
|
dialogues = json.load(f) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"dialogues": [dialogues[dialogue_name] for dialogue_name in split_list["train"]], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"dialogues": [dialogues[dialogue_name] for dialogue_name in split_list["dev"]], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"dialogues": [dialogues[dialogue_name] for dialogue_name in split_list["test"]], |
|
}, |
|
), |
|
] |
|
|
|
|
|
def _generate_examples(self, dialogues): |
|
|
|
|
|
|
|
for id_, dialogue in enumerate(dialogues): |
|
example = { |
|
"dialogue_id": dialogue["dialogue_id"], |
|
"dialogue_name": dialogue["dialogue_name"], |
|
"system_name": dialogue["system_name"], |
|
"user_name": dialogue["user_name"], |
|
"goal": [], |
|
"goal_description": [], |
|
"turns": [], |
|
} |
|
|
|
for domain, tasks in dialogue["goal"].items(): |
|
for task, slot_values in tasks.items(): |
|
if task == "reqt": |
|
slot_values = {slot: None for slot in slot_values} |
|
for slot, value in slot_values.items(): |
|
example["goal"].append({ |
|
"domain": domain, |
|
"task": task, |
|
"slot": slot, |
|
"value": value, |
|
}) |
|
|
|
for domain, texts in dialogue["goal_description"].items(): |
|
for text in texts: |
|
example["goal_description"].append({ |
|
"domain": domain, |
|
"text": text, |
|
}) |
|
|
|
for turn in dialogue["turns"]: |
|
example_turn = { |
|
"turn_id": turn["turn_id"], |
|
"speaker": turn["speaker"], |
|
"utterance": turn["utterance"], |
|
"dialogue_state": { |
|
"belief_state": [], |
|
"book_state": [], |
|
"db_result": {}, |
|
"book_result": [], |
|
}, |
|
} |
|
if turn["speaker"] == "SYSTEM": |
|
for domain, slots in turn["dialogue_state"]["belief_state"].items(): |
|
for slot, value in slots.items(): |
|
example_turn["dialogue_state"]["belief_state"].append({ |
|
"domain": domain, |
|
"slot": slot, |
|
"value": value, |
|
}) |
|
|
|
for domain, slots in turn["dialogue_state"]["book_state"].items(): |
|
for slot, value in slots.items(): |
|
example_turn["dialogue_state"]["book_state"].append({ |
|
"domain": domain, |
|
"slot": slot, |
|
"value": value, |
|
}) |
|
|
|
candidate_entities = turn["dialogue_state"]["db_result"]["candidate_entities"] |
|
active_entity = turn["dialogue_state"]["db_result"]["active_entity"] |
|
if not active_entity: |
|
active_entity = {} |
|
example_turn["dialogue_state"]["db_result"] = { |
|
"candidate_entities":candidate_entities, |
|
"active_entity": [{ |
|
"slot": slot, |
|
"value": _flatten_value(value), |
|
} for slot, value in active_entity.items()] |
|
} |
|
|
|
for domain, result in turn["dialogue_state"]["book_result"].items(): |
|
example_turn["dialogue_state"]["book_result"].append({ |
|
"domain": domain, |
|
"success": result["success"], |
|
"ref": result["ref"], |
|
}) |
|
|
|
example["turns"].append(example_turn) |
|
|
|
yield id_, example |
|
|