Spaces:
Running
Running
import streamlit as st | |
import pandas as pd | |
from huggingface_hub import HfApi | |
from huggingface_hub.utils import RepositoryNotFoundError, RevisionNotFoundError | |
from itertools import combinations | |
import re | |
from functools import cache | |
from io import StringIO | |
from yall import create_yall | |
import plotly.graph_objs as go | |
from huggingface_hub import ModelCard | |
def calculate_pages(df, items_per_page): | |
return -(-len(df) // items_per_page) # Equivalent to math.ceil(len(df) / items_per_page) | |
# Function to get model info from Hugging Face API using caching | |
def cached_model_info(api, model): | |
try: | |
return api.model_info(repo_id=str(model)) | |
except (RepositoryNotFoundError, RevisionNotFoundError): | |
return None | |
# Function to get model info from DataFrame and update it with likes and tags | |
def get_model_info(df): | |
api = HfApi() | |
for index, row in df.iterrows(): | |
model_info = cached_model_info(api, row['Model'].strip()) | |
if model_info: | |
df.loc[index, 'Likes'] = model_info.likes | |
df.loc[index, 'Tags'] = ', '.join(model_info.tags) | |
else: | |
df.loc[index, 'Likes'] = -1 | |
df.loc[index, 'Tags'] = '' | |
return df | |
# Function to convert markdown table to DataFrame and extract Hugging Face URLs | |
def convert_markdown_table_to_dataframe(md_content): | |
""" | |
Converts markdown table to Pandas DataFrame, handling special characters and links, | |
extracts Hugging Face URLs, and adds them to a new column. | |
""" | |
# Remove leading and trailing | characters | |
cleaned_content = re.sub(r'\|\s*$', '', re.sub(r'^\|\s*', '', md_content, flags=re.MULTILINE), flags=re.MULTILINE) | |
# Create DataFrame from cleaned content | |
df = pd.read_csv(StringIO(cleaned_content), sep="\|", engine='python') | |
# Remove the first row after the header | |
df = df.drop(0, axis=0) | |
# Strip whitespace from column names | |
df.columns = df.columns.str.strip() | |
# Extract Hugging Face URLs and add them to a new column | |
model_link_pattern = r'\[(.*?)\]\((.*?)\)\s*\[.*?\]\(.*?\)' | |
df['URL'] = df['Model'].apply(lambda x: re.search(model_link_pattern, x).group(2) if re.search(model_link_pattern, x) else None) | |
# Clean Model column to have only the model link text | |
df['Model'] = df['Model'].apply(lambda x: re.sub(model_link_pattern, r'\1', x)) | |
return df | |
def get_model_info(df): | |
api = HfApi() | |
# Initialize new columns for likes and tags | |
df['Likes'] = None | |
df['Tags'] = None | |
# Iterate through DataFrame rows | |
for index, row in df.iterrows(): | |
model = row['Model'].strip() | |
try: | |
model_info = api.model_info(repo_id=str(model)) | |
df.loc[index, 'Likes'] = model_info.likes | |
df.loc[index, 'Tags'] = ', '.join(model_info.tags) | |
except (RepositoryNotFoundError, RevisionNotFoundError): | |
df.loc[index, 'Likes'] = -1 | |
df.loc[index, 'Tags'] = '' | |
return df | |
#def calculate_highest_combined_score(data, column): | |
# score_columns = ['Average', 'AGIEval', 'GPT4All', 'TruthfulQA', 'Bigbench'] | |
# # Ensure the column exists and has numeric data | |
# if column not in data.columns or not pd.api.types.is_numeric_dtype(data[column]): | |
# return column, {} | |
# scores = data[column].dropna().tolist() | |
# models = data['Model'].tolist() | |
# top_combinations = {r: [] for r in range(2, 5)} | |
# for r in range(2, 5): | |
# for combination in combinations(zip(scores, models), r): | |
# combined_score = sum(score for score, _ in combination) | |
# top_combinations[r].append((combined_score, tuple(model for _, model in combination))) | |
# top_combinations[r].sort(key=lambda x: x[0], reverse=True) | |
# top_combinations[r] = top_combinations[r][:5] | |
# return column, top_combinations | |
## Modified function to display the results of the highest combined scores using st.dataframe | |
#def display_highest_combined_scores(data): | |
# score_columns = ['Average', 'AGIEval', 'GPT4All', 'TruthfulQA', 'Bigbench'] | |
# with st.spinner('Calculating highest combined scores...'): | |
# results = [calculate_highest_combined_score(data, col) for col in score_columns] | |
# for column, top_combinations in results: | |
# st.subheader(f"Top Combinations for {column}") | |
# for r, combinations in top_combinations.items(): | |
# # Prepare data for DataFrame | |
# rows = [{'Score': score, 'Models': ', '.join(combination)} for score, combination in combinations] | |
# df = pd.DataFrame(rows) | |
# | |
# # Display using st.dataframe | |
# st.markdown(f"**Number of Models: {r}**") | |
# st.dataframe(df, height=150) # Adjust height as necessary | |
# Function to create bar chart for a given category | |
def create_bar_chart(df, category): | |
"""Create and display a bar chart for a given category.""" | |
st.write(f"### {category} Scores") | |
# Sort the DataFrame based on the category score | |
sorted_df = df[['Model', category]].sort_values(by=category, ascending=True) | |
# Create the bar chart with a color gradient (using 'Viridis' color scale as an example) | |
fig = go.Figure(go.Bar( | |
x=sorted_df[category], | |
y=sorted_df['Model'], | |
orientation='h', | |
marker=dict(color=sorted_df[category], colorscale='Spectral') # You can change 'Viridis' to another color scale | |
)) | |
# Update layout for better readability | |
fig.update_layout( | |
margin=dict(l=20, r=20, t=20, b=20) | |
) | |
# Adjust the height of the chart based on the number of rows in the DataFrame | |
st.plotly_chart(fig, use_container_width=True, height=len(df) * 35) | |
def fetch_merge_configs(df): | |
# Sort the DataFrame | |
df_sorted = df.sort_values(by='Average', ascending=False).head(20) | |
configurations = [] | |
matches = [] | |
# Get model cards for the top 20 entries | |
for index, row in df_sorted.iterrows(): | |
model_name = row['Model'].rstrip() | |
try: | |
card = ModelCard.load(model_name) | |
configurations.append({ | |
"Model Name": model_name, | |
"Scores": row["Average"], | |
"AGIEval": row["AGIEval"], | |
"GPT4All": row["GPT4All"], | |
"TruthfulQA": row["TruthfulQA"], | |
"Bigbench": row["Bigbench"], | |
"Model Card": str(card) | |
}) | |
match = re.findall(r'yaml(.*?)```', str(card), re.DOTALL) | |
if match: | |
matches.append(match[0]) | |
except Exception as e: | |
print(f"Failed to load model card for {model_name}. Error: {e}") | |
csv_data = df.to_csv(index=False) | |
return configurations, matches, csv_data | |
# Main function to run the Streamlit app | |
def main(): | |
# Set page configuration and title | |
st.set_page_config(page_title="YALL - Yet Another LLM Leaderboard", layout="wide") | |
st.title("🏆 YALL - Yet Another LLM Leaderboard") | |
st.markdown("Leaderboard made with 🧐 [LLM AutoEval](https://github.com/mlabonne/llm-autoeval) using [Nous](https://huggingface.co/NousResearch) benchmark suite.") | |
# Create tabs for leaderboard and about section | |
content = create_yall() | |
tab1, tab2 = st.tabs(["🏆 Leaderboard", "📝 About"]) | |
# Leaderboard tab | |
with tab1: | |
if content: | |
try: | |
score_columns = ['Average', 'AGIEval', 'GPT4All', 'TruthfulQA', 'Bigbench'] | |
# Display dataframe | |
full_df = convert_markdown_table_to_dataframe(content) | |
for col in score_columns: | |
# Corrected use of pd.to_numeric | |
full_df[col] = pd.to_numeric(full_df[col].str.strip(), errors='coerce') | |
full_df = get_model_info(full_df) | |
full_df['Tags'] = full_df['Tags'].fillna('') | |
df = pd.DataFrame(columns=full_df.columns) | |
# Toggles for filtering by tags | |
show_phi = st.checkbox("Phi (2.8B)", value=True) | |
show_mistral = st.checkbox("Mistral (7B)", value=True) | |
show_other = st.checkbox("Other", value=True) | |
# Create a DataFrame based on selected filters | |
dfs_to_concat = [] | |
if show_phi: | |
dfs_to_concat.append(full_df[full_df['Tags'].str.lower().str.contains('phi,|phi-msft,')]) | |
if show_mistral: | |
dfs_to_concat.append(full_df[full_df['Tags'].str.lower().str.contains('mistral,')]) | |
if show_other: | |
other_df = full_df[~full_df['Tags'].str.lower().str.contains('phi,|phi-msft,|mistral,')] | |
dfs_to_concat.append(other_df) | |
# Concatenate the DataFrames | |
if dfs_to_concat: | |
df = pd.concat(dfs_to_concat, ignore_index=True) | |
# Add a search bar | |
search_query = st.text_input("Search models", "") | |
# Filter the DataFrame based on the search query | |
if search_query: | |
df = df[df['Model'].str.contains(search_query, case=False)] | |
# Add a selectbox for page selection | |
items_per_page = 50 | |
pages = calculate_pages(df, items_per_page) | |
page = st.selectbox("Page", list(range(1, pages + 1))) | |
# Sort the DataFrame by 'Average' column in descending order | |
df = df.sort_values(by='Average', ascending=False) | |
# Slice the DataFrame based on the selected page | |
start = (page - 1) * items_per_page | |
end = start + items_per_page | |
df = df[start:end] | |
# Display the filtered DataFrame or the entire leaderboard | |
st.dataframe( | |
df[['Model'] + score_columns + ['Likes', 'URL']], | |
use_container_width=True, | |
column_config={ | |
"Likes": st.column_config.NumberColumn( | |
"Likes", | |
help="Number of likes on Hugging Face", | |
format="%d ❤️", | |
), | |
"URL": st.column_config.LinkColumn("URL"), | |
}, | |
hide_index=True, | |
height=len(df) * 37, | |
) | |
selected_models = st.multiselect('Select models to compare', df['Model'].unique()) | |
comparison_df = df[df['Model'].isin(selected_models)] | |
st.dataframe(comparison_df) | |
# Add a button to export data to CSV | |
if st.button("Export to CSV"): | |
# Export the DataFrame to CSV | |
csv_data = df.to_csv(index=False) | |
# Create a link to download the CSV file | |
st.download_button( | |
label="Download CSV", | |
data=csv_data, | |
file_name="leaderboard.csv", | |
key="download-csv", | |
help="Click to download the CSV file", | |
) | |
if st.button("Fetch Merge-Configs"): | |
# Call the function with the current DataFrame | |
configurations, matches, csv_data = fetch_merge_configs(full_df) # Assuming full_df is your DataFrame | |
# You can then display the configurations or matches as needed, or write them to a file | |
# For example, displaying the configurations: | |
for config in configurations: | |
st.text(f"Model Name: {config['Model Name']}\nScores: {config['Scores']}\nAGIEval: {config['AGIEval']}\nGPT4All: {config['GPT4All']}\nTruthfulQA: {config['TruthfulQA']}\nBigbench: {config['Bigbench']}\nModel Card: {config['Model Card']}\n\n") | |
# Convert the list of dictionaries to a DataFrame | |
configurations_df = pd.DataFrame(configurations) | |
# Convert the DataFrame to a CSV string | |
configurations_csv = configurations_df.to_csv(index=False) | |
st.download_button( | |
label="Download Configurations", | |
data=configurations_csv, | |
file_name="configurations.csv", | |
key="download-csv", | |
help="Click to download the CSV file", | |
) | |
# Full-width plot for the first category | |
create_bar_chart(df, score_columns[0]) | |
# Next two plots in two columns | |
col1, col2 = st.columns(2) | |
with col1: | |
create_bar_chart(df, score_columns[1]) | |
with col2: | |
create_bar_chart(df, score_columns[2]) | |
# Last two plots in two columns | |
col3, col4 = st.columns(2) | |
with col3: | |
create_bar_chart(df, score_columns[3]) | |
with col4: | |
create_bar_chart(df, score_columns[4]) | |
# display_highest_combined_scores(full_df) # Call to display the calculated scores | |
except Exception as e: | |
st.error("An error occurred while processing the markdown table.") | |
st.error(str(e)) | |
else: | |
st.error("Failed to download the content from the URL provided.") | |
# About tab | |
with tab2: | |
st.markdown('''markdown | |
### Nous benchmark suite | |
Popularized by [Teknium](https://huggingface.co/teknium) and [NousResearch](https://huggingface.co/NousResearch), this benchmark suite aggregates four benchmarks: | |
* [**AGIEval**](https://arxiv.org/abs/2304.06364) (0-shot): `agieval_aqua_rat,agieval_logiqa_en,agieval_lsat_ar,agieval_lsat_lr,agieval_lsat_rc,agieval_sat_en,agieval_sat_en_without_passage,agieval_sat_math` | |
* **GPT4ALL** (0-shot): `hellaswag,openbookqa,winogrande,arc_easy,arc_challenge,boolq,piqa` | |
* [**TruthfulQA**](https://arxiv.org/abs/2109.07958) (0-shot): `truthfulqa_mc` | |
* [**Bigbench**](https://arxiv.org/abs/2206.04615) (0-shot): `bigbench_causal_judgement,bigbench_date_understanding,bigbench_disambiguation_qa,bigbench_geometric_shapes,bigbench_logical_deduction_five_objects,bigbench_logical_deduction_seven_objects,bigbench_logical_deduction_three_objects,bigbench_movie_recommendation,bigbench_navigate,bigbench_reasoning_about_colored_objects,bigbench_ruin_names,bigbench_salient_translation_error_detection,bigbench_snarks,bigbench_sports_understanding,bigbench_temporal_sequences,bigbench_tracking_shuffled_objects_five_objects,bigbench_tracking_shuffled_objects_seven_objects,bigbench_tracking_shuffled_objects_three_objects` | |
### Reproducibility | |
You can easily reproduce these results using 🧐 [LLM AutoEval](https://github.com/mlabonne/llm-autoeval/tree/master), a colab notebook that automates the evaluation process (benchmark: `nous`). This will upload the results to GitHub as gists. You can find the entire table with the links to the detailed results [here](https://gist.github.com/mlabonne/90294929a2dbcb8877f9696f28105fdf). | |
### Clone this space | |
You can create your own leaderboard with your LLM AutoEval results on GitHub Gist. You just need to clone this space and specify two variables: | |
* Change the `gist_id` in [yall.py](https://huggingface.co/spaces/mlabonne/Yet_Another_LLM_Leaderboard/blob/main/yall.py#L126). | |
* Create "New Secret" in Settings > Variables and secrets (name: "github", value: [your GitHub token](https://github.com/settings/tokens)) | |
A special thanks to [gblazex](https://huggingface.co/gblazex) for providing many evaluations. | |
# Bonus: Workflow for Automating Model Evaluation and Selection | |
## Step 1. Export CSV Data from Another-LLM-LeaderBoards | |
Go to our [Another-LLM-LeaderBoards](https://leaderboards.example.com) and click the export csv data button. Save it to `/tmp/models.csv`. | |
## Step 2: Examine CSV Data | |
Run a script for extracting model names, benchmark scores, and model page link from the CSV data. | |
```python | |
import re | |
from huggingface_hub import ModelCard | |
import pandas as pd | |
# Load the CSV data | |
df = pd.read_csv('/tmp/models.csv') | |
# Sort the data by the second column (assuming the column name is 'Average') | |
df_sorted = df.sort_values(by='Average', ascending=False) | |
# Open the file in append mode | |
with open('configurations.txt', 'a') as file: | |
# Get model cards for the top 20 entries | |
for index, row in df_sorted.head(20).iterrows(): | |
model_name = row['Model'].rstrip() | |
card = ModelCard.load(model_name) | |
file.write(f'Model Name: {model_name}\n') | |
file.write(f'Scores: {row["Average"]}\n') # Assuming 'Average' is the benchmark score | |
file.write(f'AGIEval: {row["AGIEval"]}\n') | |
file.write(f'GPT4All: {row["GPT4All"]}\n') | |
file.write(f'TruthfulQA: {row["TruthfulQA"]}\n') | |
file.write(f'Bigbench: {row["Bigbench"]}\n') | |
file.write(f'Model Card: {card}\n') | |
``` | |
## Step 3: Feed the Discovered Models, Scores and Configurations to LLM-client (shell-gpt) | |
Run your local LLM-client by feeding it all the discovered merged models, their benchmark scores and if found the configurations used to merge them. Provide it with an instruction similar to this: | |
```bash | |
cat /tmp/configurations2.txt | sgpt --chat config "Based on the merged models that are provided here, along with their respective benchmark achievements and the configurations used in merging them, your task is to come up with a new configuration for a new merged model that will outperform all others. In your thought process, argue and reflect on your own choices to improve your thinking process and outcome" | |
``` | |
## Step 4: (Optional) Reflect on Initial Configuration Suggested by Chat-GPT | |
If you wanted to get particularly naughty, you could add a step like this where you make Chat-GPT rethink and reflect on the configuration it initially comes up with based on the information you gave it. | |
```bash | |
for i in $(seq 1 3); do echo "$i" && sgpt --chat config "Repeat the process from before and again reflect and improve on your suggested configuration"; sleep 20; done | |
``` | |
## Step 5: Wait for Chat-GPT to give you a LeaderBoard-topping merge configuration | |
Wait for Chat-GPT to provide a new merge configuration. | |
## Step 6: Enter the Configuration in Automergekit NoteBook | |
Fire up your automergekit NoteBook and enter in the configuration that was just so generously provided to you by Chat-GPT. | |
## Step 7: Evaluate the New Merge using auto-llm-eval notebook | |
Fire up your auto-llm-eval notebook to see if the merge that Chat-GPT came up with is actually making any sense and performing well. | |
## Step 8: Repeat the Process | |
Repeat this process for a few times every day, learning from each new model created. | |
## Step 9: Rank the New Number One Model | |
Rank the new number one model and top your own LeaderBoard: (Model: CultriX/MergeCeption-7B-v3) | |
![image.png](https://cdn-uploads.huggingface.co/production/uploads/6495d5a915d8ef6f01bc75eb/mFV3Ou469fk6ivj1XrD9d.png) | |
## Step 10: Automate the Process with Cronjob | |
Create a cronjob that automates this process 5 times every day, only to then learn from the models that it has created in order to create even better ones and I'm telling you that you better prepare yourself for some non-neglectable increases in benchmark scores for the near future. | |
Cheers, | |
CultriX | |
''') | |
# Run the main function if this script is run directly | |
if __name__ == "__main__": | |
main() | |