Spaces:
Running
Running
File size: 13,656 Bytes
8a0dd37 fc68f79 8a0dd37 fc68f79 8a0dd37 fc68f79 8a0dd37 fc68f79 8a0dd37 fc68f79 8a0dd37 fc68f79 8a0dd37 fc68f79 8a0dd37 fc68f79 8a0dd37 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 |
# Importing necessary libraries
import re
import streamlit as st
import requests
import pandas as pd
from io import StringIO
import plotly.graph_objs as go
from huggingface_hub import HfApi
from huggingface_hub.utils import RepositoryNotFoundError, RevisionNotFoundError
from yall import create_yall
from functools import cache
# Importing necessary libraries
import streamlit as st
import pandas as pd
from io import StringIO
import plotly.graph_objs as go
from huggingface_hub import HfApi
from huggingface_hub.utils import RepositoryNotFoundError, RevisionNotFoundError
from itertools import combinations
import time
from collections import Counter
import re
from functools import cache
# Function to get model info from Hugging Face API using caching
@cache
def cached_model_info(api, model):
try:
return api.model_info(repo_id=str(model))
except (RepositoryNotFoundError, RevisionNotFoundError):
return None
# Function to convert markdown table to DataFrame and extract Hugging Face URLs
def convert_markdown_table_to_dataframe(md_content):
cleaned_content = re.sub(r'\|\s*$', '', re.sub(r'^\|\s*', '', md_content, flags=re.MULTILINE), flags=re.MULTILINE)
df = pd.read_csv(StringIO(cleaned_content), sep="\|", engine='python')
df = df.drop(0, axis=0)
df.columns = df.columns.str.strip()
model_link_pattern = r'\[(.*?)\]\((.*?)\)\s*\[.*?\]\(.*?\)'
df['URL'] = df['Model'].apply(lambda x: re.search(model_link_pattern, x).group(2) if re.search(model_link_pattern, x) else None)
df['Model'] = df['Model'].apply(lambda x: re.sub(model_link_pattern, r'\1', x))
return df
# Function to get and update model info in the DataFrame
def get_and_update_model_info(df):
api = HfApi()
for index, row in df.iterrows():
model_info = cached_model_info(api, row['Model'].strip())
if model_info:
df.loc[index, 'Likes'] = model_info.likes
df.loc[index, 'Tags'] = ', '.join(model_info.tags)
else:
df.loc[index, 'Likes'] = -1
df.loc[index, 'Tags'] = ''
return df
# Define the score columns
score_columns = ['Average', 'AGIEval', 'GPT4All', 'TruthfulQA', 'Bigbench']
# Function to calculate the highest combined score for a given column
def calculate_highest_combined_score(data, column):
scores = data[column].dropna().tolist() # Ensure to drop NaN values to avoid calculation errors
models = data['Model'].dropna().tolist()
top_combinations = {2: [], 3: [], 4: [], 5: [], 6: []}
for r in range(2, 7):
for combination in combinations(zip(scores, models), r):
combined_score = sum(score for score, _ in combination)
top_combinations[r].append((combined_score, tuple(model for _, model in combination)))
top_combinations[r] = sorted(top_combinations[r], key=lambda x: x[0], reverse=True)[:3]
return column, top_combinations
# Function to display the results of the highest combined scores
def display_highest_combined_scores(data):
for column in score_columns:
if column in data:
_, top_combinations = calculate_highest_combined_score(data, column)
st.subheader(f"Top Combinations for {column}")
for r, combinations in top_combinations.items():
st.write(f"**Number of Models: {r}**")
for score, combination in combinations:
st.write(f"Score: {score}, Models: {', '.join(combination)}")
# Function to get model info from DataFrame and update it with likes and tags
@st.cache
def get_model_info(df):
api = HfApi()
for index, row in df.iterrows():
model_info = cached_model_info(api, row['Model'].strip())
if model_info:
df.loc[index, 'Likes'] = model_info.likes
df.loc[index, 'Tags'] = ', '.join(model_info.tags)
else:
df.loc[index, 'Likes'] = -1
df.loc[index, 'Tags'] = ''
return df
# Function to get model info from Hugging Face API using caching
@cache
def cached_model_info(api, model):
try:
return api.model_info(repo_id=str(model))
except (RepositoryNotFoundError, RevisionNotFoundError):
return None
# Function to calculate the highest combined score for a given column
def calculate_highest_combined_score(data, column):
scores = data[column].tolist()
models = data['Model'].tolist()
top_combinations = {2: [], 3: [], 4: [], 5: [], 6: []}
for r in range(2, 7):
for combination in combinations(zip(scores, models), r):
combined_score = sum(score for score, _ in combination)
top_combinations[r].append((combined_score, tuple(model for _, model in combination)))
top_combinations[r] = sorted(top_combinations[r], key=lambda x: x[0], reverse=True)[:3]
return column, top_combinations
# Function to create and display charts (existing functions can be reused or modified as needed)
@st.cache_data
def get_model_info(df):
api = HfApi()
# Initialize new columns for likes and tags
df['Likes'] = None
df['Tags'] = None
# Iterate through DataFrame rows
for index, row in df.iterrows():
model = row['Model'].strip()
try:
model_info = api.model_info(repo_id=str(model))
df.loc[index, 'Likes'] = model_info.likes
df.loc[index, 'Tags'] = ', '.join(model_info.tags)
except (RepositoryNotFoundError, RevisionNotFoundError):
df.loc[index, 'Likes'] = -1
df.loc[index, 'Tags'] = ''
return df
# Function to create bar chart for a given category
def create_bar_chart(df, category):
"""Create and display a bar chart for a given category."""
st.write(f"### {category} Scores")
# Sort the DataFrame based on the category score
sorted_df = df[['Model', category]].sort_values(by=category, ascending=True)
# Create the bar chart with a color gradient (using 'Viridis' color scale as an example)
fig = go.Figure(go.Bar(
x=sorted_df[category],
y=sorted_df['Model'],
orientation='h',
marker=dict(color=sorted_df[category], colorscale='Spectral') # You can change 'Viridis' to another color scale
))
# Update layout for better readability
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20)
)
# Adjust the height of the chart based on the number of rows in the DataFrame
st.plotly_chart(fig, use_container_width=True, height=len(df) * 35)
# Main function to run the Streamlit app
def main():
st.title("π YALL - Yet Another LLM Leaderboard")
st.markdown("Leaderboard made with π§ [LLM AutoEval](https://github.com/mlabonne/llm-autoeval) using [Nous](https://huggingface.co/NousResearch) benchmark suite.")
# Create tabs for leaderboard and about section
content = create_yall()
tab1, tab2 = st.tabs(["π Leaderboard", "π About"])
# Leaderboard tab
with tab1:
if content:
try:
score_columns = ['Average', 'AGIEval', 'GPT4All', 'TruthfulQA', 'Bigbench']
# Display dataframe
full_df = convert_markdown_table_to_dataframe(content)
for col in score_columns:
# Corrected use of pd.to_numeric
full_df[col] = pd.to_numeric(full_df[col].str.strip(), errors='coerce')
full_df = get_model_info(full_df)
full_df['Tags'] = full_df['Tags'].fillna('')
df = pd.DataFrame(columns=full_df.columns)
# Toggles for filtering by tags
show_phi = st.checkbox("Phi (2.8B)", value=True)
show_mistral = st.checkbox("Mistral (7B)", value=True)
show_other = st.checkbox("Other", value=True)
# Create a DataFrame based on selected filters
dfs_to_concat = []
if show_phi:
dfs_to_concat.append(full_df[full_df['Tags'].str.lower().str.contains('phi,|phi-msft,')])
if show_mistral:
dfs_to_concat.append(full_df[full_df['Tags'].str.lower().str.contains('mistral,')])
if show_other:
other_df = full_df[~full_df['Tags'].str.lower().str.contains('phi,|phi-msft,|mistral,')]
dfs_to_concat.append(other_df)
# Concatenate the DataFrames
if dfs_to_concat:
df = pd.concat(dfs_to_concat, ignore_index=True)
# Add a search bar
search_query = st.text_input("Search models", "")
# Filter the DataFrame based on the search query
if search_query:
df = df[df['Model'].str.contains(search_query, case=False)]
# Display the filtered DataFrame or the entire leaderboard
st.dataframe(
df[['Model'] + score_columns + ['Likes', 'URL']],
use_container_width=True,
column_config={
"Likes": st.column_config.NumberColumn(
"Likes",
help="Number of likes on Hugging Face",
format="%d β€οΈ",
),
"URL": st.column_config.LinkColumn("URL"),
},
hide_index=True,
height=len(df) * 37,
)
selected_models = st.multiselect('Select models to compare', df['Model'].unique())
comparison_df = df[df['Model'].isin(selected_models)]
st.dataframe(comparison_df)
# Add a button to export data to CSV
if st.button("Export to CSV"):
# Export the DataFrame to CSV
csv_data = df.to_csv(index=False)
# Create a link to download the CSV file
st.download_button(
label="Download CSV",
data=csv_data,
file_name="leaderboard.csv",
key="download-csv",
help="Click to download the CSV file",
)
# Full-width plot for the first category
create_bar_chart(df, score_columns[0])
# Next two plots in two columns
col1, col2 = st.columns(2)
with col1:
create_bar_chart(df, score_columns[1])
with col2:
create_bar_chart(df, score_columns[2])
# Last two plots in two columns
col3, col4 = st.columns(2)
with col3:
create_bar_chart(df, score_columns[3])
with col4:
create_bar_chart(df, score_columns[4])
except Exception as e:
st.error("An error occurred while processing the markdown table.")
st.error(str(e))
else:
st.error("Failed to download the content from the URL provided.")
# About tab
with tab2:
st.markdown('''
### Nous benchmark suite
Popularized by [Teknium](https://huggingface.co/teknium) and [NousResearch](https://huggingface.co/NousResearch), this benchmark suite aggregates four benchmarks:
* [**AGIEval**](https://arxiv.org/abs/2304.06364) (0-shot): `agieval_aqua_rat,agieval_logiqa_en,agieval_lsat_ar,agieval_lsat_lr,agieval_lsat_rc,agieval_sat_en,agieval_sat_en_without_passage,agieval_sat_math`
* **GPT4ALL** (0-shot): `hellaswag,openbookqa,winogrande,arc_easy,arc_challenge,boolq,piqa`
* [**TruthfulQA**](https://arxiv.org/abs/2109.07958) (0-shot): `truthfulqa_mc`
* [**Bigbench**](https://arxiv.org/abs/2206.04615) (0-shot): `bigbench_causal_judgement,bigbench_date_understanding,bigbench_disambiguation_qa,bigbench_geometric_shapes,bigbench_logical_deduction_five_objects,bigbench_logical_deduction_seven_objects,bigbench_logical_deduction_three_objects,bigbench_movie_recommendation,bigbench_navigate,bigbench_reasoning_about_colored_objects,bigbench_ruin_names,bigbench_salient_translation_error_detection,bigbench_snarks,bigbench_sports_understanding,bigbench_temporal_sequences,bigbench_tracking_shuffled_objects_five_objects,bigbench_tracking_shuffled_objects_seven_objects,bigbench_tracking_shuffled_objects_three_objects`
### Reproducibility
You can easily reproduce these results using π§ [LLM AutoEval](https://github.com/mlabonne/llm-autoeval/tree/master), a colab notebook that automates the evaluation process (benchmark: `nous`). This will upload the results to GitHub as gists. You can find the entire table with the links to the detailed results [here](https://gist.github.com/mlabonne/90294929a2dbcb8877f9696f28105fdf).
### Clone this space
You can create your own leaderboard with your LLM AutoEval results on GitHub Gist. You just need to clone this space and specify two variables:
* Change the `gist_id` in [yall.py](https://huggingface.co/spaces/mlabonne/Yet_Another_LLM_Leaderboard/blob/main/yall.py#L126).
* Create "New Secret" in Settings > Variables and secrets (name: "github", value: [your GitHub token](https://github.com/settings/tokens))
A special thanks to [gblazex](https://huggingface.co/gblazex) for providing many evaluations.
''')
# Run the main function if this script is run directly
if __name__ == "__main__":
main()
|