binwang's picture
Upload folder using huggingface_hub
62dd38d verified
raw
history blame
9.53 kB
dataname_column_rename_in_table = {
'librispeech_test_clean' : 'LibriSpeech-Clean',
'librispeech_test_other' : 'LibriSpeech-Other',
'common_voice_15_en_test' : 'CommonVoice-15-EN',
'peoples_speech_test' : 'Peoples-Speech',
'gigaspeech_test' : 'GigaSpeech-1',
'earnings21_test' : 'Earnings-21',
'earnings22_test' : 'Earnings-22',
'tedlium3_test' : 'TED-LIUM-3',
'tedlium3_long_form_test' : 'TED-LIUM-3-Long',
'aishell_asr_zh_test' : 'Aishell-ASR-ZH',
'covost2_en_id_test' : 'CoVoST2-EN-ID',
'covost2_en_zh_test' : 'CoVoST2-EN-ZH',
'covost2_en_ta_test' : 'CoVoST2-EN-TA',
'covost2_id_en_test' : 'CoVoST2-ID-EN',
'covost2_zh_en_test' : 'CoVoST2-ZH-EN',
'covost2_ta_en_test' : 'CoVoST2-TA-EN',
'cn_college_listen_mcq_test' : 'CN-College-Listen-MCQ',
'dream_tts_mcq_test' : 'DREAM-TTS-MCQ',
'slue_p2_sqa5_test' : 'SLUE-P2-SQA5',
'public_sg_speech_qa_test' : 'Public-SG-Speech-QA',
'spoken_squad_test' : 'Spoken-SQuAD',
'openhermes_audio_test' : 'OpenHermes-Audio',
'alpaca_audio_test' : 'ALPACA-Audio',
'wavcaps_test' : 'WavCaps',
'audiocaps_test' : 'AudioCaps',
'clotho_aqa_test' : 'Clotho-AQA',
'wavcaps_qa_test' : 'WavCaps-QA',
'audiocaps_qa_test' : 'AudioCaps-QA',
'voxceleb_accent_test' : 'VoxCeleb-Accent',
'voxceleb_gender_test' : 'VoxCeleb-Gender',
'iemocap_gender_test' : 'IEMOCAP-Gender',
'iemocap_emotion_test' : 'IEMOCAP-Emotion',
'meld_sentiment_test' : 'MELD-Sentiment',
'meld_emotion_test' : 'MELD-Emotion',
'imda_part1_asr_test' : 'IMDA-Part1-ASR',
'imda_part2_asr_test' : 'IMDA-Part2-ASR',
'imda_part3_30s_asr_test' : 'IMDA-Part3-30s-ASR',
'imda_part4_30s_asr_test' : 'IMDA-Part4-30s-ASR',
'imda_part5_30s_asr_test' : 'IMDA-Part5-30s-ASR',
'imda_part6_30s_asr_test' : 'IMDA-Part6-30s-ASR',
'muchomusic_test' : 'MuChoMusic',
'imda_part3_30s_sqa_human_test': 'MNSC-PART3-SQA',
'imda_part4_30s_sqa_human_test': 'MNSC-PART4-SQA',
'imda_part5_30s_sqa_human_test': 'MNSC-PART5-SQA',
'imda_part6_30s_sqa_human_test': 'MNSC-PART6-SQA',
}
asr_datsets = {'LibriSpeech-Test-Clean': 'A clean, high-quality testset of the LibriSpeech dataset, used for ASR testing.',
'LibriSpeech-Test-Other' : 'A more challenging, noisier testset of the LibriSpeech dataset for ASR testing.',
'Common-Voice-15-En-Test': 'Test set from the Common Voice project, which is a crowd-sourced, multilingual speech dataset.',
'Peoples-Speech-Test' : 'A large-scale, open-source speech recognition dataset, with diverse accents and domains.',
'GigaSpeech-Test' : 'A large-scale ASR dataset with diverse audio sources like podcasts, interviews, etc.',
'Earnings21-Test' : 'ASR test dataset focused on earnings calls from 2021, with professional speech and financial jargon.',
'Earnings22-Test' : 'Similar to Earnings21, but covering earnings calls from 2022.',
'Tedlium3-Test' : 'A test set derived from TED talks, covering diverse speakers and topics.',
'Tedlium3-Long-form-Test': 'A longer version of the TED-LIUM dataset, containing extended audio samples. This poses challenges to existing fusion methods in handling long audios. However, it provides benchmark for future development.',
}
singlish_asr_datasets = {
'IMDA-Part1-ASR-Test' : 'Speech recognition test data from the IMDA NSC project, Part 1.',
'IMDA-Part2-ASR-Test' : 'Speech recognition test data from the IMDA NSC project, Part 2.',
'IMDA-Part3-30s-ASR-Test': 'Speech recognition test data from the IMDA NSC project, Part 3.',
'IMDA-Part4-30s-ASR-Test': 'Speech recognition test data from the IMDA NSC project, Part 4.',
'IMDA-Part5-30s-ASR-Test': 'Speech recognition test data from the IMDA NSC project, Part 5.',
'IMDA-Part6-30s-ASR-Test': 'Speech recognition test data from the IMDA NSC project, Part 6.'
}
sqa_datasets = {'CN-College-Listen-MCQ-Test': 'Chinese College English Listening Test, with multiple-choice questions.',
'DREAM-TTS-MCQ-Test' : 'DREAM dataset for spoken question-answering, derived from textual data and synthesized speech.',
'SLUE-P2-SQA5-Test' : 'Spoken Language Understanding Evaluation (SLUE) dataset, part 2, focused on QA tasks.',
'Public-SG-Speech-QA-Test': 'Public dataset for speech-based question answering, gathered from Singapore.',
'Spoken-Squad-Test' : 'Spoken SQuAD dataset, based on the textual SQuAD dataset, converted into audio.'
}
sqa_singlish_datasets = {
'MNSC-PART3-SQA': 'Multitak National Speech Corpus (MNSC) dataset, Question answering task, Part 3.',
'MNSC-PART4-SQA': 'Multitak National Speech Corpus (MNSC) dataset, Question answering task, Part 4.',
'MNSC-PART5-SQA': 'Multitak National Speech Corpus (MNSC) dataset, Question answering task, Part 5.',
'MNSC-PART6-SQA': 'Multitak National Speech Corpus (MNSC) dataset, Question answering task, Part 6.',
}
si_datasets = {
'OpenHermes-Audio-Test': 'Test set for spoken instructions. Synthesized from the OpenHermes dataset.',
'ALPACA-Audio-Test' : 'Spoken version of the ALPACA dataset, used for evaluating instruction following in audio.'
}
ac_datasets = {
'WavCaps-Test' : 'WavCaps is a dataset for testing audio captioning, where models generate textual descriptions of audio clips.',
'AudioCaps-Test': 'AudioCaps dataset, used for generating captions from general audio events.'
}
asqa_datasets = {
'Clotho-AQA-Test' : 'Clotho dataset adapted for audio-based question answering, containing audio clips and questions.',
'WavCaps-QA-Test' : 'Question-answering test dataset derived from WavCaps, focusing on audio content.',
'AudioCaps-QA-Test': 'AudioCaps adapted for question-answering tasks, using audio events as input for Q&A.'
}
er_datasets = {
'IEMOCAP-Emotion-Test': 'Emotion recognition test data from the IEMOCAP dataset, focusing on identifying emotions in speech.',
'MELD-Sentiment-Test' : 'Sentiment recognition from speech using the MELD dataset, classifying positive, negative, or neutral sentiments.',
'MELD-Emotion-Test' : 'Emotion classification in speech using MELD, detecting specific emotions like happiness, anger, etc.'
}
ar_datsets = {
'VoxCeleb-Accent-Test': 'Test dataset for accent recognition, based on VoxCeleb, a large speaker identification dataset.'
}
gr_datasets = {
'VoxCeleb-Gender-Test': 'Test dataset for gender classification, also derived from VoxCeleb.',
'IEMOCAP-Gender-Test' : 'Gender classification based on the IEMOCAP dataset.'
}
spt_datasets = {
'CoVoST2-EN-ID-test': 'CoVoST 2 dataset for speech translation from English to Indonesian.',
'CoVoST2-EN-ZH-test': 'CoVoST 2 dataset for speech translation from English to Chinese.',
'CoVoST2-EN-TA-test': 'CoVoST 2 dataset for speech translation from English to Tamil.',
'CoVoST2-ID-EN-test': 'CoVoST 2 dataset for speech translation from Indonesian to English.',
'CoVoST2-ZH-EN-test': 'CoVoST 2 dataset for speech translation from Chinese to English.',
'CoVoST2-TA-EN-test': 'CoVoST 2 dataset for speech translation from Tamil to English.'
}
cnasr_datasets = {
'Aishell-ASR-ZH-Test': 'ASR test dataset for Mandarin Chinese, based on the Aishell dataset.'
}
MUSIC_MCQ_DATASETS = {
'MuChoMusic-Test': 'Test dataset for music understanding, from paper: MuChoMusic: Evaluating Music Understanding in Multimodal Audio-Language Models.'
}
metrics = {
'wer' : 'Word Error Rate (WER), a common metric for ASR evaluation. (The lower, the better)',
'llama3_70b_judge_binary': 'Binary evaluation using the LLAMA3-70B model, for tasks requiring a binary outcome. (0-100 based on score 0-1)',
'llama3_70b_judge' : 'General evaluation using the LLAMA3-70B model, typically scoring based on subjective judgments. (0-100 based on score 0-5)',
'meteor' : 'METEOR, a metric used for evaluating text generation, often used in translation or summarization tasks. (Sensitive to output length)',
'bleu' : 'BLEU (Bilingual Evaluation Understudy), another text generation evaluation metric commonly used in machine translation. (Sensitive to output length)',
}
metrics_info = {
'wer' : 'Word Error Rate (WER) - The Lower, the better.',
'llama3_70b_judge_binary': 'Model-as-a-Judge Peformance. Using LLAMA-3-70B. Scale from 0-100. The higher, the better.',
'llama3_70b_judge' : 'Model-as-a-Judge Peformance. Using LLAMA-3-70B. Scale from 0-100. The higher, the better.',
'meteor' : 'METEOR Score. The higher, the better.',
'bleu' : 'BLEU Score. The higher, the better.',
}