Delete legacy JSON metadata

#3
by albertvillanova HF staff - opened
Files changed (1) hide show
  1. dataset_infos.json +0 -1
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"dyda_da": {"description": "The Sequence labellIng evaLuatIon benChmark fOr spoken laNguagE (SILICONE) benchmark is a collection\n of resources for training, evaluating, and analyzing natural language understanding systems\n specifically designed for spoken language. All datasets are in the English language and cover a\n variety of domains including daily life, scripted scenarios, joint task completion, phone call\n conversations, and televsion dialogue. Some datasets additionally include emotion and/or sentimant\n labels.\n", "citation": "@InProceedings{li2017dailydialog,\nauthor = {Li, Yanran and Su, Hui and Shen, Xiaoyu and Li, Wenjie and Cao, Ziqiang and Niu, Shuzi},\ntitle = {DailyDialog: A Manually Labelled Multi-turn Dialogue Dataset},\nbooktitle = {Proceedings of The 8th International Joint Conference on Natural Language Processing (IJCNLP 2017)},\nyear = {2017}\n}\n@inproceedings{chapuis-etal-2020-hierarchical,\n title = \"Hierarchical Pre-training for Sequence Labelling in Spoken Dialog\",\n author = \"Chapuis, Emile and\n Colombo, Pierre and\n Manica, Matteo and\n Labeau, Matthieu and\n Clavel, Chlo{'e}\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2020\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.findings-emnlp.239\",\n doi = \"10.18653/v1/2020.findings-emnlp.239\",\n pages = \"2636--2648\",\n abstract = \"Sequence labelling tasks like Dialog Act and Emotion/Sentiment identification are a\n key component of spoken dialog systems. In this work, we propose a new approach to learn\n generic representations adapted to spoken dialog, which we evaluate on a new benchmark we\n call Sequence labellIng evaLuatIon benChmark fOr spoken laNguagE benchmark (SILICONE).\n SILICONE is model-agnostic and contains 10 different datasets of various sizes.\n We obtain our representations with a hierarchical encoder based on transformer architectures,\n for which we extend two well-known pre-training objectives. Pre-training is performed on\n OpenSubtitles: a large corpus of spoken dialog containing over 2.3 billion of tokens. We\n demonstrate how hierarchical encoders achieve competitive results with consistently fewer\n parameters compared to state-of-the-art models and we show their importance for both\n pre-training and fine-tuning.\",\n}\n", "homepage": "http://yanran.li/dailydialog.html", "license": "", "features": {"Utterance": {"dtype": "string", "id": null, "_type": "Value"}, "Dialogue_Act": {"dtype": "string", "id": null, "_type": "Value"}, "Dialogue_ID": {"dtype": "string", "id": null, "_type": "Value"}, "Label": {"num_classes": 4, "names": ["commissive", "directive", "inform", "question"], "names_file": null, "id": null, "_type": "ClassLabel"}, "Idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "silicone", "config_name": "dyda_da", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 8346638, "num_examples": 87170, "dataset_name": "silicone"}, "validation": {"name": "validation", "num_bytes": 764277, "num_examples": 8069, "dataset_name": "silicone"}, "test": {"name": "test", "num_bytes": 740226, "num_examples": 7740, "dataset_name": "silicone"}}, "download_checksums": {"https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/dyda/train.csv": {"num_bytes": 7519783, "checksum": "9710558dd08351e1544424fcf43f8aed5a3e99daba3e1f133e86acf2373fb4c6"}, "https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/dyda/dev.csv": {"num_bytes": 688589, "checksum": "66be9ec11f0234686cd338d5e7a374e1706ffd998b54fd3e6b72ef18598470b2"}, "https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/dyda/test.csv": {"num_bytes": 666553, "checksum": "3b711ebac2a1a64067aea5252c6bbbb1abe1f9f1434d2e94f9dd4a99706fc2ce"}}, "download_size": 8874925, "post_processing_size": null, "dataset_size": 9851141, "size_in_bytes": 18726066}, "dyda_e": {"description": "The Sequence labellIng evaLuatIon benChmark fOr spoken laNguagE (SILICONE) benchmark is a collection\n of resources for training, evaluating, and analyzing natural language understanding systems\n specifically designed for spoken language. All datasets are in the English language and cover a\n variety of domains including daily life, scripted scenarios, joint task completion, phone call\n conversations, and televsion dialogue. Some datasets additionally include emotion and/or sentimant\n labels.\n", "citation": "@InProceedings{li2017dailydialog,\nauthor = {Li, Yanran and Su, Hui and Shen, Xiaoyu and Li, Wenjie and Cao, Ziqiang and Niu, Shuzi},\ntitle = {DailyDialog: A Manually Labelled Multi-turn Dialogue Dataset},\nbooktitle = {Proceedings of The 8th International Joint Conference on Natural Language Processing (IJCNLP 2017)},\nyear = {2017}\n}\n@inproceedings{chapuis-etal-2020-hierarchical,\n title = \"Hierarchical Pre-training for Sequence Labelling in Spoken Dialog\",\n author = \"Chapuis, Emile and\n Colombo, Pierre and\n Manica, Matteo and\n Labeau, Matthieu and\n Clavel, Chlo{'e}\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2020\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.findings-emnlp.239\",\n doi = \"10.18653/v1/2020.findings-emnlp.239\",\n pages = \"2636--2648\",\n abstract = \"Sequence labelling tasks like Dialog Act and Emotion/Sentiment identification are a\n key component of spoken dialog systems. In this work, we propose a new approach to learn\n generic representations adapted to spoken dialog, which we evaluate on a new benchmark we\n call Sequence labellIng evaLuatIon benChmark fOr spoken laNguagE benchmark (SILICONE).\n SILICONE is model-agnostic and contains 10 different datasets of various sizes.\n We obtain our representations with a hierarchical encoder based on transformer architectures,\n for which we extend two well-known pre-training objectives. Pre-training is performed on\n OpenSubtitles: a large corpus of spoken dialog containing over 2.3 billion of tokens. We\n demonstrate how hierarchical encoders achieve competitive results with consistently fewer\n parameters compared to state-of-the-art models and we show their importance for both\n pre-training and fine-tuning.\",\n}\n", "homepage": "http://yanran.li/dailydialog.html", "license": "", "features": {"Utterance": {"dtype": "string", "id": null, "_type": "Value"}, "Emotion": {"dtype": "string", "id": null, "_type": "Value"}, "Dialogue_ID": {"dtype": "string", "id": null, "_type": "Value"}, "Label": {"num_classes": 7, "names": ["anger", "disgust", "fear", "happiness", "no emotion", "sadness", "surprise"], "names_file": null, "id": null, "_type": "ClassLabel"}, "Idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "silicone", "config_name": "dyda_e", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 8547111, "num_examples": 87170, "dataset_name": "silicone"}, "validation": {"name": "validation", "num_bytes": 781445, "num_examples": 8069, "dataset_name": "silicone"}, "test": {"name": "test", "num_bytes": 757670, "num_examples": 7740, "dataset_name": "silicone"}}, "download_checksums": {"https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/dyda/train.csv": {"num_bytes": 7519783, "checksum": "9710558dd08351e1544424fcf43f8aed5a3e99daba3e1f133e86acf2373fb4c6"}, "https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/dyda/dev.csv": {"num_bytes": 688589, "checksum": "66be9ec11f0234686cd338d5e7a374e1706ffd998b54fd3e6b72ef18598470b2"}, "https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/dyda/test.csv": {"num_bytes": 666553, "checksum": "3b711ebac2a1a64067aea5252c6bbbb1abe1f9f1434d2e94f9dd4a99706fc2ce"}}, "download_size": 8874925, "post_processing_size": null, "dataset_size": 10086226, "size_in_bytes": 18961151}, "iemocap": {"description": "The Sequence labellIng evaLuatIon benChmark fOr spoken laNguagE (SILICONE) benchmark is a collection\n of resources for training, evaluating, and analyzing natural language understanding systems\n specifically designed for spoken language. All datasets are in the English language and cover a\n variety of domains including daily life, scripted scenarios, joint task completion, phone call\n conversations, and televsion dialogue. Some datasets additionally include emotion and/or sentimant\n labels.\n", "citation": "@article{busso2008iemocap,\ntitle={IEMOCAP: Interactive emotional dyadic motion capture database},\nauthor={Busso, Carlos and Bulut, Murtaza and Lee, Chi-Chun and Kazemzadeh, Abe and Mower,\nEmily and Kim, Samuel and Chang, Jeannette N and Lee, Sungbok and Narayanan, Shrikanth S},\njournal={Language resources and evaluation},\nvolume={42},\nnumber={4},\npages={335},\nyear={2008},\npublisher={Springer}\n}\n@inproceedings{chapuis-etal-2020-hierarchical,\n title = \"Hierarchical Pre-training for Sequence Labelling in Spoken Dialog\",\n author = \"Chapuis, Emile and\n Colombo, Pierre and\n Manica, Matteo and\n Labeau, Matthieu and\n Clavel, Chlo{'e}\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2020\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.findings-emnlp.239\",\n doi = \"10.18653/v1/2020.findings-emnlp.239\",\n pages = \"2636--2648\",\n abstract = \"Sequence labelling tasks like Dialog Act and Emotion/Sentiment identification are a\n key component of spoken dialog systems. In this work, we propose a new approach to learn\n generic representations adapted to spoken dialog, which we evaluate on a new benchmark we\n call Sequence labellIng evaLuatIon benChmark fOr spoken laNguagE benchmark (SILICONE).\n SILICONE is model-agnostic and contains 10 different datasets of various sizes.\n We obtain our representations with a hierarchical encoder based on transformer architectures,\n for which we extend two well-known pre-training objectives. Pre-training is performed on\n OpenSubtitles: a large corpus of spoken dialog containing over 2.3 billion of tokens. We\n demonstrate how hierarchical encoders achieve competitive results with consistently fewer\n parameters compared to state-of-the-art models and we show their importance for both\n pre-training and fine-tuning.\",\n}\n", "homepage": "https://sail.usc.edu/iemocap/", "license": "", "features": {"Dialogue_ID": {"dtype": "string", "id": null, "_type": "Value"}, "Utterance_ID": {"dtype": "string", "id": null, "_type": "Value"}, "Utterance": {"dtype": "string", "id": null, "_type": "Value"}, "Emotion": {"dtype": "string", "id": null, "_type": "Value"}, "Label": {"num_classes": 11, "names": ["ang", "dis", "exc", "fea", "fru", "hap", "neu", "oth", "sad", "sur", "xxx"], "names_file": null, "id": null, "_type": "ClassLabel"}, "Idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "silicone", "config_name": "iemocap", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 908180, "num_examples": 7213, "dataset_name": "silicone"}, "validation": {"name": "validation", "num_bytes": 100969, "num_examples": 805, "dataset_name": "silicone"}, "test": {"name": "test", "num_bytes": 254248, "num_examples": 2021, "dataset_name": "silicone"}}, "download_checksums": {"https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/iemocap/train.csv": {"num_bytes": 834130, "checksum": "8bd85ac4c3081b9ab9a4607dc5726b563d7ace1dcc7b427ac751ed54d853086b"}, "https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/iemocap/dev.csv": {"num_bytes": 92272, "checksum": "8133acf20055d1e82733f70aa2dd381b01d2749909d77b2da8f32444c964eb51"}, "https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/iemocap/test.csv": {"num_bytes": 232376, "checksum": "7749583b50747f584ce4e8b7c137c546107f37c203c4c9a342ab03b8a4fa4406"}}, "download_size": 1158778, "post_processing_size": null, "dataset_size": 1263397, "size_in_bytes": 2422175}, "maptask": {"description": "The Sequence labellIng evaLuatIon benChmark fOr spoken laNguagE (SILICONE) benchmark is a collection\n of resources for training, evaluating, and analyzing natural language understanding systems\n specifically designed for spoken language. All datasets are in the English language and cover a\n variety of domains including daily life, scripted scenarios, joint task completion, phone call\n conversations, and televsion dialogue. Some datasets additionally include emotion and/or sentimant\n labels.\n", "citation": "@inproceedings{thompson1993hcrc,\ntitle={The HCRC map task corpus: natural dialogue for speech recognition},\nauthor={Thompson, Henry S and Anderson, Anne H and Bard, Ellen Gurman and Doherty-Sneddon,\nGwyneth and Newlands, Alison and Sotillo, Cathy},\nbooktitle={HUMAN LANGUAGE TECHNOLOGY: Proceedings of a Workshop Held at Plainsboro, New Jersey, March 21-24, 1993},\nyear={1993}\n}\n@inproceedings{chapuis-etal-2020-hierarchical,\n title = \"Hierarchical Pre-training for Sequence Labelling in Spoken Dialog\",\n author = \"Chapuis, Emile and\n Colombo, Pierre and\n Manica, Matteo and\n Labeau, Matthieu and\n Clavel, Chlo{'e}\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2020\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.findings-emnlp.239\",\n doi = \"10.18653/v1/2020.findings-emnlp.239\",\n pages = \"2636--2648\",\n abstract = \"Sequence labelling tasks like Dialog Act and Emotion/Sentiment identification are a\n key component of spoken dialog systems. In this work, we propose a new approach to learn\n generic representations adapted to spoken dialog, which we evaluate on a new benchmark we\n call Sequence labellIng evaLuatIon benChmark fOr spoken laNguagE benchmark (SILICONE).\n SILICONE is model-agnostic and contains 10 different datasets of various sizes.\n We obtain our representations with a hierarchical encoder based on transformer architectures,\n for which we extend two well-known pre-training objectives. Pre-training is performed on\n OpenSubtitles: a large corpus of spoken dialog containing over 2.3 billion of tokens. We\n demonstrate how hierarchical encoders achieve competitive results with consistently fewer\n parameters compared to state-of-the-art models and we show their importance for both\n pre-training and fine-tuning.\",\n}\n", "homepage": "http://groups.inf.ed.ac.uk/maptask/", "license": "", "features": {"Speaker": {"dtype": "string", "id": null, "_type": "Value"}, "Utterance": {"dtype": "string", "id": null, "_type": "Value"}, "Dialogue_Act": {"dtype": "string", "id": null, "_type": "Value"}, "Label": {"num_classes": 12, "names": ["acknowledge", "align", "check", "clarify", "explain", "instruct", "query_w", "query_yn", "ready", "reply_n", "reply_w", "reply_y"], "names_file": null, "id": null, "_type": "ClassLabel"}, "Idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "silicone", "config_name": "maptask", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1260413, "num_examples": 20905, "dataset_name": "silicone"}, "validation": {"name": "validation", "num_bytes": 178184, "num_examples": 2963, "dataset_name": "silicone"}, "test": {"name": "test", "num_bytes": 171806, "num_examples": 2894, "dataset_name": "silicone"}}, "download_checksums": {"https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/maptask/train.txt": {"num_bytes": 821386, "checksum": "be52bd3de9c5fe134edba6d45ed112285f6db271dbe69a5a93557e6a0ef625fa"}, "https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/maptask/dev.txt": {"num_bytes": 115951, "checksum": "92f86ce1b15f1e05e16cee83732494ded4f572e0a474dd64ea952658601ccee7"}, "https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/maptask/test.txt": {"num_bytes": 111020, "checksum": "4758898044e6849c29084971bae9957ae06854e6606682b5d77984b5b861f87b"}}, "download_size": 1048357, "post_processing_size": null, "dataset_size": 1610403, "size_in_bytes": 2658760}, "meld_e": {"description": "The Sequence labellIng evaLuatIon benChmark fOr spoken laNguagE (SILICONE) benchmark is a collection\n of resources for training, evaluating, and analyzing natural language understanding systems\n specifically designed for spoken language. All datasets are in the English language and cover a\n variety of domains including daily life, scripted scenarios, joint task completion, phone call\n conversations, and televsion dialogue. Some datasets additionally include emotion and/or sentimant\n labels.\n", "citation": "@article{chen2018emotionlines,\ntitle={Emotionlines: An emotion corpus of multi-party conversations},\nauthor={Chen, Sheng-Yeh and Hsu, Chao-Chun and Kuo, Chuan-Chun and Ku, Lun-Wei and others},\njournal={arXiv preprint arXiv:1802.08379},\nyear={2018}\n}\n@inproceedings{chapuis-etal-2020-hierarchical,\n title = \"Hierarchical Pre-training for Sequence Labelling in Spoken Dialog\",\n author = \"Chapuis, Emile and\n Colombo, Pierre and\n Manica, Matteo and\n Labeau, Matthieu and\n Clavel, Chlo{'e}\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2020\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.findings-emnlp.239\",\n doi = \"10.18653/v1/2020.findings-emnlp.239\",\n pages = \"2636--2648\",\n abstract = \"Sequence labelling tasks like Dialog Act and Emotion/Sentiment identification are a\n key component of spoken dialog systems. In this work, we propose a new approach to learn\n generic representations adapted to spoken dialog, which we evaluate on a new benchmark we\n call Sequence labellIng evaLuatIon benChmark fOr spoken laNguagE benchmark (SILICONE).\n SILICONE is model-agnostic and contains 10 different datasets of various sizes.\n We obtain our representations with a hierarchical encoder based on transformer architectures,\n for which we extend two well-known pre-training objectives. Pre-training is performed on\n OpenSubtitles: a large corpus of spoken dialog containing over 2.3 billion of tokens. We\n demonstrate how hierarchical encoders achieve competitive results with consistently fewer\n parameters compared to state-of-the-art models and we show their importance for both\n pre-training and fine-tuning.\",\n}\n", "homepage": "https://affective-meld.github.io/", "license": "", "features": {"Utterance": {"dtype": "string", "id": null, "_type": "Value"}, "Speaker": {"dtype": "string", "id": null, "_type": "Value"}, "Emotion": {"dtype": "string", "id": null, "_type": "Value"}, "Dialogue_ID": {"dtype": "string", "id": null, "_type": "Value"}, "Utterance_ID": {"dtype": "string", "id": null, "_type": "Value"}, "Label": {"num_classes": 7, "names": ["anger", "disgust", "fear", "joy", "neutral", "sadness", "surprise"], "names_file": null, "id": null, "_type": "ClassLabel"}, "Idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "silicone", "config_name": "meld_e", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 916337, "num_examples": 9989, "dataset_name": "silicone"}, "validation": {"name": "validation", "num_bytes": 100234, "num_examples": 1109, "dataset_name": "silicone"}, "test": {"name": "test", "num_bytes": 242352, "num_examples": 2610, "dataset_name": "silicone"}}, "download_checksums": {"https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/meld/train.csv": {"num_bytes": 1131992, "checksum": "e734e07bba181798ac658d0a03d42d784a4fec0f7f9e48c996fc9bce7d3f7f20"}, "https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/meld/dev.csv": {"num_bytes": 123198, "checksum": "a69e5b1b50cd683432aefa7f1bdf355c581f05bd1b387bcec0dd2129a794ea1c"}, "https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/meld/test.csv": {"num_bytes": 297824, "checksum": "89d4948730222969a361909086d1d77a3c55d0dbb9e96da2ad90a5dc78b5077d"}}, "download_size": 1553014, "post_processing_size": null, "dataset_size": 1258923, "size_in_bytes": 2811937}, "meld_s": {"description": "The Sequence labellIng evaLuatIon benChmark fOr spoken laNguagE (SILICONE) benchmark is a collection\n of resources for training, evaluating, and analyzing natural language understanding systems\n specifically designed for spoken language. All datasets are in the English language and cover a\n variety of domains including daily life, scripted scenarios, joint task completion, phone call\n conversations, and televsion dialogue. Some datasets additionally include emotion and/or sentimant\n labels.\n", "citation": "@article{chen2018emotionlines,\ntitle={Emotionlines: An emotion corpus of multi-party conversations},\nauthor={Chen, Sheng-Yeh and Hsu, Chao-Chun and Kuo, Chuan-Chun and Ku, Lun-Wei and others},\njournal={arXiv preprint arXiv:1802.08379},\nyear={2018}\n}\n@inproceedings{chapuis-etal-2020-hierarchical,\n title = \"Hierarchical Pre-training for Sequence Labelling in Spoken Dialog\",\n author = \"Chapuis, Emile and\n Colombo, Pierre and\n Manica, Matteo and\n Labeau, Matthieu and\n Clavel, Chlo{'e}\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2020\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.findings-emnlp.239\",\n doi = \"10.18653/v1/2020.findings-emnlp.239\",\n pages = \"2636--2648\",\n abstract = \"Sequence labelling tasks like Dialog Act and Emotion/Sentiment identification are a\n key component of spoken dialog systems. In this work, we propose a new approach to learn\n generic representations adapted to spoken dialog, which we evaluate on a new benchmark we\n call Sequence labellIng evaLuatIon benChmark fOr spoken laNguagE benchmark (SILICONE).\n SILICONE is model-agnostic and contains 10 different datasets of various sizes.\n We obtain our representations with a hierarchical encoder based on transformer architectures,\n for which we extend two well-known pre-training objectives. Pre-training is performed on\n OpenSubtitles: a large corpus of spoken dialog containing over 2.3 billion of tokens. We\n demonstrate how hierarchical encoders achieve competitive results with consistently fewer\n parameters compared to state-of-the-art models and we show their importance for both\n pre-training and fine-tuning.\",\n}\n", "homepage": "https://affective-meld.github.io/", "license": "", "features": {"Utterance": {"dtype": "string", "id": null, "_type": "Value"}, "Speaker": {"dtype": "string", "id": null, "_type": "Value"}, "Sentiment": {"dtype": "string", "id": null, "_type": "Value"}, "Dialogue_ID": {"dtype": "string", "id": null, "_type": "Value"}, "Utterance_ID": {"dtype": "string", "id": null, "_type": "Value"}, "Label": {"num_classes": 3, "names": ["negative", "neutral", "positive"], "names_file": null, "id": null, "_type": "ClassLabel"}, "Idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "silicone", "config_name": "meld_s", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 930405, "num_examples": 9989, "dataset_name": "silicone"}, "validation": {"name": "validation", "num_bytes": 101801, "num_examples": 1109, "dataset_name": "silicone"}, "test": {"name": "test", "num_bytes": 245873, "num_examples": 2610, "dataset_name": "silicone"}}, "download_checksums": {"https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/meld/train.csv": {"num_bytes": 1131992, "checksum": "e734e07bba181798ac658d0a03d42d784a4fec0f7f9e48c996fc9bce7d3f7f20"}, "https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/meld/dev.csv": {"num_bytes": 123198, "checksum": "a69e5b1b50cd683432aefa7f1bdf355c581f05bd1b387bcec0dd2129a794ea1c"}, "https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/meld/test.csv": {"num_bytes": 297824, "checksum": "89d4948730222969a361909086d1d77a3c55d0dbb9e96da2ad90a5dc78b5077d"}}, "download_size": 1553014, "post_processing_size": null, "dataset_size": 1278079, "size_in_bytes": 2831093}, "mrda": {"description": "The Sequence labellIng evaLuatIon benChmark fOr spoken laNguagE (SILICONE) benchmark is a collection\n of resources for training, evaluating, and analyzing natural language understanding systems\n specifically designed for spoken language. All datasets are in the English language and cover a\n variety of domains including daily life, scripted scenarios, joint task completion, phone call\n conversations, and televsion dialogue. Some datasets additionally include emotion and/or sentimant\n labels.\n", "citation": "@techreport{shriberg2004icsi,\ntitle={The ICSI meeting recorder dialog act (MRDA) corpus},\nauthor={Shriberg, Elizabeth and Dhillon, Raj and Bhagat, Sonali and Ang, Jeremy and Carvey, Hannah},\nyear={2004},\ninstitution={INTERNATIONAL COMPUTER SCIENCE INST BERKELEY CA}\n}\n@inproceedings{chapuis-etal-2020-hierarchical,\n title = \"Hierarchical Pre-training for Sequence Labelling in Spoken Dialog\",\n author = \"Chapuis, Emile and\n Colombo, Pierre and\n Manica, Matteo and\n Labeau, Matthieu and\n Clavel, Chlo{'e}\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2020\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.findings-emnlp.239\",\n doi = \"10.18653/v1/2020.findings-emnlp.239\",\n pages = \"2636--2648\",\n abstract = \"Sequence labelling tasks like Dialog Act and Emotion/Sentiment identification are a\n key component of spoken dialog systems. In this work, we propose a new approach to learn\n generic representations adapted to spoken dialog, which we evaluate on a new benchmark we\n call Sequence labellIng evaLuatIon benChmark fOr spoken laNguagE benchmark (SILICONE).\n SILICONE is model-agnostic and contains 10 different datasets of various sizes.\n We obtain our representations with a hierarchical encoder based on transformer architectures,\n for which we extend two well-known pre-training objectives. Pre-training is performed on\n OpenSubtitles: a large corpus of spoken dialog containing over 2.3 billion of tokens. We\n demonstrate how hierarchical encoders achieve competitive results with consistently fewer\n parameters compared to state-of-the-art models and we show their importance for both\n pre-training and fine-tuning.\",\n}\n", "homepage": "https://www.aclweb.org/anthology/W04-2319", "license": "", "features": {"Utterance_ID": {"dtype": "string", "id": null, "_type": "Value"}, "Dialogue_Act": {"dtype": "string", "id": null, "_type": "Value"}, "Channel_ID": {"dtype": "string", "id": null, "_type": "Value"}, "Speaker": {"dtype": "string", "id": null, "_type": "Value"}, "Dialogue_ID": {"dtype": "string", "id": null, "_type": "Value"}, "Utterance": {"dtype": "string", "id": null, "_type": "Value"}, "Label": {"num_classes": 5, "names": ["s", "d", "b", "f", "q"], "names_file": null, "id": null, "_type": "ClassLabel"}, "Idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "silicone", "config_name": "mrda", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 9998857, "num_examples": 83943, "dataset_name": "silicone"}, "validation": {"name": "validation", "num_bytes": 1143286, "num_examples": 9815, "dataset_name": "silicone"}, "test": {"name": "test", "num_bytes": 1807462, "num_examples": 15470, "dataset_name": "silicone"}}, "download_checksums": {"https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/mrda/train.csv": {"num_bytes": 7977039, "checksum": "d7a963aac70eb80d315b76b9e71d82a7888532ef8c6752c84487e34dfce3b7eb"}, "https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/mrda/dev.csv": {"num_bytes": 903718, "checksum": "dc795c60b3825645a20dbb0700e279271fc52eef4bc297564a0227f608a19619"}, "https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/mrda/test.csv": {"num_bytes": 1425091, "checksum": "8426417d316cb0aa57f13e46e866e8964eed71f719212ab1da1c3cbaa23ea414"}}, "download_size": 10305848, "post_processing_size": null, "dataset_size": 12949605, "size_in_bytes": 23255453}, "oasis": {"description": "The Sequence labellIng evaLuatIon benChmark fOr spoken laNguagE (SILICONE) benchmark is a collection\n of resources for training, evaluating, and analyzing natural language understanding systems\n specifically designed for spoken language. All datasets are in the English language and cover a\n variety of domains including daily life, scripted scenarios, joint task completion, phone call\n conversations, and televsion dialogue. Some datasets additionally include emotion and/or sentimant\n labels.\n", "citation": "@inproceedings{leech2003generic,\ntitle={Generic speech act annotation for task-oriented dialogues},\nauthor={Leech, Geoffrey and Weisser, Martin},\nbooktitle={Proceedings of the corpus linguistics 2003 conference},\nvolume={16},\npages={441--446},\nyear={2003},\norganization={Lancaster: Lancaster University}\n}\n@inproceedings{chapuis-etal-2020-hierarchical,\n title = \"Hierarchical Pre-training for Sequence Labelling in Spoken Dialog\",\n author = \"Chapuis, Emile and\n Colombo, Pierre and\n Manica, Matteo and\n Labeau, Matthieu and\n Clavel, Chlo{'e}\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2020\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.findings-emnlp.239\",\n doi = \"10.18653/v1/2020.findings-emnlp.239\",\n pages = \"2636--2648\",\n abstract = \"Sequence labelling tasks like Dialog Act and Emotion/Sentiment identification are a\n key component of spoken dialog systems. In this work, we propose a new approach to learn\n generic representations adapted to spoken dialog, which we evaluate on a new benchmark we\n call Sequence labellIng evaLuatIon benChmark fOr spoken laNguagE benchmark (SILICONE).\n SILICONE is model-agnostic and contains 10 different datasets of various sizes.\n We obtain our representations with a hierarchical encoder based on transformer architectures,\n for which we extend two well-known pre-training objectives. Pre-training is performed on\n OpenSubtitles: a large corpus of spoken dialog containing over 2.3 billion of tokens. We\n demonstrate how hierarchical encoders achieve competitive results with consistently fewer\n parameters compared to state-of-the-art models and we show their importance for both\n pre-training and fine-tuning.\",\n}\n", "homepage": "http://groups.inf.ed.ac.uk/oasis/", "license": "", "features": {"Speaker": {"dtype": "string", "id": null, "_type": "Value"}, "Utterance": {"dtype": "string", "id": null, "_type": "Value"}, "Dialogue_Act": {"dtype": "string", "id": null, "_type": "Value"}, "Label": {"num_classes": 42, "names": ["accept", "ackn", "answ", "answElab", "appreciate", "backch", "bye", "complete", "confirm", "correct", "direct", "directElab", "echo", "exclaim", "expressOpinion", "expressPossibility", "expressRegret", "expressWish", "greet", "hold", "identifySelf", "inform", "informCont", "informDisc", "informIntent", "init", "negate", "offer", "pardon", "raiseIssue", "refer", "refuse", "reqDirect", "reqInfo", "reqModal", "selfTalk", "suggest", "thank", "informIntent-hold", "correctSelf", "expressRegret-inform", "thank-identifySelf"], "names_file": null, "id": null, "_type": "ClassLabel"}, "Idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "silicone", "config_name": "oasis", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 887018, "num_examples": 12076, "dataset_name": "silicone"}, "validation": {"name": "validation", "num_bytes": 112185, "num_examples": 1513, "dataset_name": "silicone"}, "test": {"name": "test", "num_bytes": 119254, "num_examples": 1478, "dataset_name": "silicone"}}, "download_checksums": {"https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/oasis/train.txt": {"num_bytes": 633398, "checksum": "5a6a42cc47e0276afde1189dbc604fb8c0cc46afab2b49de1c2c6d9b61d5ce16"}, "https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/oasis/dev.txt": {"num_bytes": 80400, "checksum": "a8d4e7d6fa8e582c80c7e3aef0d4a73fc9ac38a18f9aab10bac723ee45489c8e"}, "https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/oasis/test.txt": {"num_bytes": 88204, "checksum": "a22790b90f99b92d28a5beda6d0a8a7d107c195cb19bdfc4ce3f1df4f2ca901b"}}, "download_size": 802002, "post_processing_size": null, "dataset_size": 1118457, "size_in_bytes": 1920459}, "sem": {"description": "The Sequence labellIng evaLuatIon benChmark fOr spoken laNguagE (SILICONE) benchmark is a collection\n of resources for training, evaluating, and analyzing natural language understanding systems\n specifically designed for spoken language. All datasets are in the English language and cover a\n variety of domains including daily life, scripted scenarios, joint task completion, phone call\n conversations, and televsion dialogue. Some datasets additionally include emotion and/or sentimant\n labels.\n", "citation": "@article{mckeown2011semaine,\ntitle={The semaine database: Annotated multimodal records of emotionally colored conversations\nbetween a person and a limited agent},\nauthor={McKeown, Gary and Valstar, Michel and Cowie, Roddy and Pantic, Maja and Schroder, Marc},\njournal={IEEE transactions on affective computing},\nvolume={3},\nnumber={1},\npages={5--17},\nyear={2011},\npublisher={IEEE}\n}\n@inproceedings{chapuis-etal-2020-hierarchical,\n title = \"Hierarchical Pre-training for Sequence Labelling in Spoken Dialog\",\n author = \"Chapuis, Emile and\n Colombo, Pierre and\n Manica, Matteo and\n Labeau, Matthieu and\n Clavel, Chlo{'e}\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2020\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.findings-emnlp.239\",\n doi = \"10.18653/v1/2020.findings-emnlp.239\",\n pages = \"2636--2648\",\n abstract = \"Sequence labelling tasks like Dialog Act and Emotion/Sentiment identification are a\n key component of spoken dialog systems. In this work, we propose a new approach to learn\n generic representations adapted to spoken dialog, which we evaluate on a new benchmark we\n call Sequence labellIng evaLuatIon benChmark fOr spoken laNguagE benchmark (SILICONE).\n SILICONE is model-agnostic and contains 10 different datasets of various sizes.\n We obtain our representations with a hierarchical encoder based on transformer architectures,\n for which we extend two well-known pre-training objectives. Pre-training is performed on\n OpenSubtitles: a large corpus of spoken dialog containing over 2.3 billion of tokens. We\n demonstrate how hierarchical encoders achieve competitive results with consistently fewer\n parameters compared to state-of-the-art models and we show their importance for both\n pre-training and fine-tuning.\",\n}\n", "homepage": "https://ieeexplore.ieee.org/document/5959155", "license": "", "features": {"Utterance": {"dtype": "string", "id": null, "_type": "Value"}, "NbPairInSession": {"dtype": "string", "id": null, "_type": "Value"}, "Dialogue_ID": {"dtype": "string", "id": null, "_type": "Value"}, "SpeechTurn": {"dtype": "string", "id": null, "_type": "Value"}, "Speaker": {"dtype": "string", "id": null, "_type": "Value"}, "Sentiment": {"dtype": "string", "id": null, "_type": "Value"}, "Label": {"num_classes": 3, "names": ["Negative", "Neutral", "Positive"], "names_file": null, "id": null, "_type": "ClassLabel"}, "Idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "silicone", "config_name": "sem", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 496168, "num_examples": 4264, "dataset_name": "silicone"}, "validation": {"name": "validation", "num_bytes": 57896, "num_examples": 485, "dataset_name": "silicone"}, "test": {"name": "test", "num_bytes": 100072, "num_examples": 878, "dataset_name": "silicone"}}, "download_checksums": {"https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/sem/train.csv": {"num_bytes": 389633, "checksum": "9fa9fc0851babf987b2f0c09c507eb46b5e9fab1a8680d8789a4c24af53cde9c"}, "https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/sem/dev.csv": {"num_bytes": 45859, "checksum": "43d93b46664e9e250ef59f551e02fdef54501b1bab5d2932bc3a97bf32d6c365"}, "https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/sem/test.csv": {"num_bytes": 78197, "checksum": "c8024e6e431776c9b838ad3f23290cb33b4a9d00bdc820e090bd62ef7b4a63db"}}, "download_size": 513689, "post_processing_size": null, "dataset_size": 654136, "size_in_bytes": 1167825}, "swda": {"description": "The Sequence labellIng evaLuatIon benChmark fOr spoken laNguagE (SILICONE) benchmark is a collection\n of resources for training, evaluating, and analyzing natural language understanding systems\n specifically designed for spoken language. All datasets are in the English language and cover a\n variety of domains including daily life, scripted scenarios, joint task completion, phone call\n conversations, and televsion dialogue. Some datasets additionally include emotion and/or sentimant\n labels.\n", "citation": "@article{stolcke2000dialogue,\ntitle={Dialogue act modeling for automatic tagging and recognition of conversational speech},\nauthor={Stolcke, Andreas and Ries, Klaus and Coccaro, Noah and Shriberg, Elizabeth and\nBates, Rebecca and Jurafsky, Daniel and Taylor, Paul and Martin, Rachel and Ess-Dykema,\nCarol Van and Meteer, Marie},\njournal={Computational linguistics},\nvolume={26},\nnumber={3},\npages={339--373},\nyear={2000},\npublisher={MIT Press}\n}\n@inproceedings{chapuis-etal-2020-hierarchical,\n title = \"Hierarchical Pre-training for Sequence Labelling in Spoken Dialog\",\n author = \"Chapuis, Emile and\n Colombo, Pierre and\n Manica, Matteo and\n Labeau, Matthieu and\n Clavel, Chlo{'e}\",\n booktitle = \"Findings of the Association for Computational Linguistics: EMNLP 2020\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.findings-emnlp.239\",\n doi = \"10.18653/v1/2020.findings-emnlp.239\",\n pages = \"2636--2648\",\n abstract = \"Sequence labelling tasks like Dialog Act and Emotion/Sentiment identification are a\n key component of spoken dialog systems. In this work, we propose a new approach to learn\n generic representations adapted to spoken dialog, which we evaluate on a new benchmark we\n call Sequence labellIng evaLuatIon benChmark fOr spoken laNguagE benchmark (SILICONE).\n SILICONE is model-agnostic and contains 10 different datasets of various sizes.\n We obtain our representations with a hierarchical encoder based on transformer architectures,\n for which we extend two well-known pre-training objectives. Pre-training is performed on\n OpenSubtitles: a large corpus of spoken dialog containing over 2.3 billion of tokens. We\n demonstrate how hierarchical encoders achieve competitive results with consistently fewer\n parameters compared to state-of-the-art models and we show their importance for both\n pre-training and fine-tuning.\",\n}\n", "homepage": "https://web.stanford.edu/~jurafsky/ws97/", "license": "", "features": {"Utterance": {"dtype": "string", "id": null, "_type": "Value"}, "Dialogue_Act": {"dtype": "string", "id": null, "_type": "Value"}, "From_Caller": {"dtype": "string", "id": null, "_type": "Value"}, "To_Caller": {"dtype": "string", "id": null, "_type": "Value"}, "Topic": {"dtype": "string", "id": null, "_type": "Value"}, "Dialogue_ID": {"dtype": "string", "id": null, "_type": "Value"}, "Conv_ID": {"dtype": "string", "id": null, "_type": "Value"}, "Label": {"num_classes": 46, "names": ["sd", "b", "sv", "%", "aa", "ba", "fc", "qw", "nn", "bk", "h", "qy^d", "bh", "^q", "bf", "fo_o_fw_\"_by_bc", "fo_o_fw_by_bc_\"", "na", "ad", "^2", "b^m", "qo", "qh", "^h", "ar", "ng", "br", "no", "fp", "qrr", "arp_nd", "t3", "oo_co_cc", "aap_am", "t1", "bd", "^g", "qw^d", "fa", "ft", "+", "x", "ny", "sv_fx", "qy_qr", "ba_fe"], "names_file": null, "id": null, "_type": "ClassLabel"}, "Idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "silicone", "config_name": "swda", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 20499788, "num_examples": 190709, "dataset_name": "silicone"}, "validation": {"name": "validation", "num_bytes": 2265898, "num_examples": 21203, "dataset_name": "silicone"}, "test": {"name": "test", "num_bytes": 291471, "num_examples": 2714, "dataset_name": "silicone"}}, "download_checksums": {"https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/swda/train.csv": {"num_bytes": 14420223, "checksum": "37475282ef24b5d53a761f0576619f7c1fb1520b1af7dfaf306f13b6c9b60d57"}, "https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/swda/dev.csv": {"num_bytes": 1589677, "checksum": "000a1f0a805f275936748d6a4d61f928b18457c8c9e1e5149c56888d95c866b5"}, "https://raw.githubusercontent.com/eusip/SILICONE-benchmark/main/swda/test.csv": {"num_bytes": 217600, "checksum": "0584658976bd4b218351630ec282145238ce02d127d76cb31c1dd47ecdfa34f4"}}, "download_size": 16227500, "post_processing_size": null, "dataset_size": 23057157, "size_in_bytes": 39284657}}