binwang's picture
Upload folder using huggingface_hub
958416e verified
raw
history blame
12.9 kB
import streamlit as st
from app.draw_diagram import *
from app.content import *
from app.summarization import *
def dataset_contents(dataset, metrics):
custom_css = """
<style>
.my-dataset-info {
# background-color: #F9EBEA;
# padding: 10px;
color: #050505;
font-style: normal;
font-size: 8px;
height: auto;
}
</style>
"""
st.markdown(custom_css, unsafe_allow_html=True)
st.markdown(f"""<div class="my-dataset-info">
<p><b>About this dataset</b>: {dataset}</p>
</div>""", unsafe_allow_html=True)
st.markdown(f"""<div class="my-dataset-info">
<p><b>About this metric</b>: {metrics}</p>
</div>""", unsafe_allow_html=True)
def dashboard():
with st.container():
st.title("AudioBench")
st.markdown("""
[gh]: https://github.com/AudioLLMs/AudioBench
[![GitHub Repo stars](https://img.shields.io/github/stars/AudioLLMs/AudioBench?style=social)][gh]
[![GitHub watchers](https://img.shields.io/github/watchers/AudioLLMs/AudioBench?style=social)][gh]
""")
st.markdown("""
### Changelog
- **Dec, 2024**:
- Added MuChoMusic dataset for Music Understanding - MCQ Questions. From Paper: https://arxiv.org/abs/2408.01337.
- Singlish ASR task added! The datasets are available on [HF](https://huggingface.co/datasets/MERaLiON/MNSC).
- **Dec, 2024**:
- Updated layout and added support for comparison between models with similar sizes.
- Reorganized layout for a better user experience.
- Added performance summary for each task.
- **Aug 2024**:
- Initial leaderboard is now online.
""")
st.divider()
st.markdown("""
#### What is [AudioBench](https://arxiv.org/abs/2406.16020)?
- AudioBench is a comprehensive evaluation benchmark designed for general instruction-following audio large language models.
- AudioBench is a evaluation benchmark that we consistently put effort in updating and maintaining.
Below are the initial 26 datasets that are included in AudioBench. We are now exteneded to over 40 datasets and going to extend to more in the future.
"""
)
with st.container():
left_co, center_co, right_co = st.columns([1, 0.5, 0.5])
with left_co:
st.image("./style/audio_overview.png",
caption="Overview of the datasets in AudioBench.",
)
st.markdown('''
''')
st.markdown("###### :dart: Our Benchmark includes: ")
cols = st.columns(10)
cols[0].metric(label="Tasks", value=">8")
cols[1].metric(label="Datasets", value=">40")
cols[2].metric(label="Evaluated Models", value=">5")
st.divider()
with st.container():
left_co, center_co, right_co = st.columns([1, 0.5, 0.5])
with left_co:
st.markdown("""
##### Citations :round_pushpin:
```
@article{wang2024audiobench,
title={AudioBench: A Universal Benchmark for Audio Large Language Models},
author={Wang, Bin and Zou, Xunlong and Lin, Geyu and Sun, Shuo and Liu, Zhuohan and Zhang, Wenyu and Liu, Zhengyuan and Aw, AiTi and Chen, Nancy F},
journal={arXiv preprint arXiv:2406.16020},
year={2024}
}
```
""")
def asr():
st.title("Task: Automatic Speech Recognition")
sum = ['Overall']
dataset_lists = [
'LibriSpeech-Test-Clean',
'LibriSpeech-Test-Other',
'Common-Voice-15-En-Test',
'Peoples-Speech-Test',
'GigaSpeech-Test',
'Earnings21-Test',
'Earnings22-Test',
'Tedlium3-Test',
'Tedlium3-Long-form-Test',
]
filters_levelone = sum + dataset_lists
left, center, _, middle, right = st.columns([0.2, 0.2, 0.2, 0.2 ,0.2])
with left:
filter_1 = st.selectbox('Dataset', filters_levelone)
if filter_1:
if filter_1 in sum:
sum_table_mulit_metrix('ASR', ['wer'])
else:
dataset_contents(asr_datsets[filter_1], metrics['wer'])
draw('su', 'ASR', filter_1, 'wer', cus_sort=True)
def singlish_asr():
st.title("Task: Automatic Speech Recognition - Singlish")
sum = ['Overall']
dataset_lists = [
'IMDA-Part1-ASR-Test',
'IMDA-Part2-ASR-Test',
'IMDA-Part3-30s-ASR-Test',
'IMDA-Part4-30s-ASR-Test',
'IMDA-Part5-30s-ASR-Test',
'IMDA-Part6-30s-ASR-Test',
]
filters_levelone = sum + dataset_lists
left, center, _, middle, right = st.columns([0.2, 0.2, 0.2, 0.2 ,0.2])
with left:
filter_1 = st.selectbox('Dataset', filters_levelone)
if filter_1:
if filter_1 in sum:
sum_table_mulit_metrix('singlish_asr', ['wer'])
else:
dataset_contents(singlish_asr_datasets[filter_1], metrics['wer'])
draw('su', 'singlish_asr', filter_1, 'wer')
def cnasr():
st.title("Task: Automatic Speech Recognition - Mandarin")
sum = ['Overall']
dataset_lists = [
'Aishell-ASR-ZH-Test',
]
filters_levelone = sum + dataset_lists
left, center, _, middle, right = st.columns([0.2, 0.2, 0.2, 0.2 ,0.2])
with left:
filter_1 = st.selectbox('Dataset', filters_levelone)
if filter_1:
if filter_1 in sum:
sum_table_mulit_metrix('CNASR', ['wer'])
else:
dataset_contents(cnasr_datasets[filter_1], metrics['wer'])
draw('su', 'CNASR', filter_1, 'wer')
def sqa():
st.title("Task: Speech Question Answering")
sum = ['Overall']
binary = ['CN-College-Listen-MCQ-Test', 'DREAM-TTS-MCQ-Test']
rest = ['SLUE-P2-SQA5-Test',
'Public-SG-Speech-QA-Test',
'Spoken-Squad-Test']
filters_levelone = sum + binary + rest
left, center, _, middle, right = st.columns([0.2, 0.2, 0.2, 0.2 ,0.2])
with left:
filter_1 = st.selectbox('Dataset', filters_levelone)
if filter_1:
if filter_1 in sum:
sum_table_mulit_metrix('SQA', ['llama3_70b_judge_binary', 'llama3_70b_judge'])
elif filter_1 in binary:
dataset_contents(sqa_datasets[filter_1], metrics['llama3_70b_judge_binary'])
draw('su', 'SQA', filter_1, 'llama3_70b_judge_binary')
else:
dataset_contents(sqa_datasets[filter_1], metrics['llama3_70b_judge'])
draw('su', 'SQA', filter_1, 'llama3_70b_judge')
def si():
st.title("Task: Speech Instruction")
sum = ['Overall']
dataset_lists = ['OpenHermes-Audio-Test',
'ALPACA-Audio-Test']
filters_levelone = sum + dataset_lists
left, center, _, middle, right = st.columns([0.2, 0.2, 0.2, 0.2 ,0.2])
with left:
filter_1 = st.selectbox('Dataset', filters_levelone)
if filter_1:
if filter_1 in sum:
sum_table_mulit_metrix('SI', ['llama3_70b_judge'])
else:
dataset_contents(si_datasets[filter_1], metrics['llama3_70b_judge'])
draw('su', 'SI', filter_1, 'llama3_70b_judge')
def ac():
st.title("Task: Audio Captioning")
filters_levelone = ['WavCaps-Test',
'AudioCaps-Test']
filters_leveltwo = ['Llama3-70b-judge', 'Meteor']
left, center, _, middle, right = st.columns([0.2, 0.2, 0.2, 0.2 ,0.2])
with left:
filter_1 = st.selectbox('Dataset', filters_levelone)
with middle:
metric = st.selectbox('Metric', filters_leveltwo)
if filter_1 or metric:
dataset_contents(ac_datasets[filter_1], metrics[metric.lower().replace('-', '_')])
draw('asu', 'AC',filter_1, metric.lower().replace('-', '_'))
def asqa():
st.title("Task: Audio Scene Question Answering")
sum = ['Overall']
dataset_lists = ['Clotho-AQA-Test',
'WavCaps-QA-Test',
'AudioCaps-QA-Test']
filters_levelone = sum + dataset_lists
left, center, _, middle, right = st.columns([0.2, 0.2, 0.2, 0.2 ,0.2])
with left:
filter_1 = st.selectbox('Dataset', filters_levelone)
if filter_1:
if filter_1 in sum:
sum_table_mulit_metrix('AQA', ['llama3_70b_judge'])
else:
dataset_contents(asqa_datasets[filter_1], metrics['llama3_70b_judge'])
draw('asu', 'AQA', filter_1, 'llama3_70b_judge')
def er():
st.title("Task: Emotion Recognition")
sum = ['Overall']
dataset_lists = ['IEMOCAP-Emotion-Test',
'MELD-Sentiment-Test',
'MELD-Emotion-Test']
filters_levelone = sum + dataset_lists
left, center, _, middle, right = st.columns([0.2, 0.2, 0.2, 0.2 ,0.2])
with left:
filter_1 = st.selectbox('Dataset', filters_levelone)
if filter_1:
if filter_1 in sum:
sum_table_mulit_metrix('ER', ['llama3_70b_judge_binary'])
else:
dataset_contents(er_datasets[filter_1], metrics['llama3_70b_judge_binary'])
draw('vu', 'ER', filter_1, 'llama3_70b_judge_binary')
def ar():
st.title("Task: Accent Recognition")
sum = ['Overall']
dataset_lists = ['VoxCeleb-Accent-Test']
filters_levelone = sum + dataset_lists
left, center, _, middle, right = st.columns([0.2, 0.2, 0.2, 0.2 ,0.2])
with left:
filter_1 = st.selectbox('Dataset', filters_levelone)
if filter_1:
if filter_1 in sum:
sum_table_mulit_metrix('AR', ['llama3_70b_judge'])
# sum_table('aR', 'llama3_70b_judge')
else:
dataset_contents(ar_datsets[filter_1], metrics['llama3_70b_judge'])
draw('vu', 'AR', filter_1, 'llama3_70b_judge')
def gr():
st.title("Task: Gender Recognition")
sum = ['Overall']
dataset_lists = ['VoxCeleb-Gender-Test',
'IEMOCAP-Gender-Test']
filters_levelone = sum + dataset_lists
left, center, _, middle, right = st.columns([0.2, 0.2, 0.2, 0.2 ,0.2])
with left:
filter_1 = st.selectbox('Dataset', filters_levelone)
if filter_1:
if filter_1 in sum:
sum_table_mulit_metrix('GR', ['llama3_70b_judge_binary'])
else:
dataset_contents(gr_datasets[filter_1], metrics['llama3_70b_judge_binary'])
draw('vu', 'GR', filter_1, 'llama3_70b_judge_binary')
def spt():
st.title("Task: Speech Translation")
sum = ['Overall']
dataset_lists = [
'Covost2-EN-ID-test',
'Covost2-EN-ZH-test',
'Covost2-EN-TA-test',
'Covost2-ID-EN-test',
'Covost2-ZH-EN-test',
'Covost2-TA-EN-test']
filters_levelone = sum + dataset_lists
left, center, _, middle, right = st.columns([0.2, 0.2, 0.2, 0.2 ,0.2])
with left:
filter_1 = st.selectbox('Dataset', filters_levelone)
if filter_1:
if filter_1 in sum:
sum_table_mulit_metrix('st', ['bleu'])
else:
dataset_contents(spt_datasets[filter_1], metrics['bleu'])
draw('su', 'ST', filter_1, 'bleu')
def music_mcq():
st.title("Task: Music Understanding - MCQ Questions")
sum = ['Overall']
dataset_lists = ['MuChoMusic-Test',
]
filters_levelone = sum + dataset_lists
left, center, _, middle, right = st.columns([0.2, 0.2, 0.2, 0.2 ,0.2])
with left:
filter_1 = st.selectbox('Dataset', filters_levelone)
if filter_1:
if filter_1 in sum:
sum_table_mulit_metrix('music_mcq', ['llama3_70b_judge_binary'])
else:
dataset_contents(MUSIC_MCQ_DATASETS[filter_1], metrics['llama3_70b_judge_binary'])
draw('vu', 'music_mcq', filter_1, 'llama3_70b_judge_binary')