import streamlit as st import pandas as pd from huggingface_hub import HfApi from huggingface_hub.utils import RepositoryNotFoundError, RevisionNotFoundError from itertools import combinations import re from functools import cache from io import StringIO @cache def cached_model_info(api, model): try: return api.model_info(repo_id=str(model)) except (RepositoryNotFoundError, RevisionNotFoundError): return None def convert_markdown_table_to_dataframe(md_content): cleaned_content = re.sub(r'\|\s*$', '', re.sub(r'^\|\s*', '', md_content, flags=re.MULTILINE), flags=re.MULTILINE) df = pd.read_csv(StringIO(cleaned_content), sep="\|", engine='python') df.columns = [col.strip() for col in df.columns] # Initialize empty columns for URL and Model if not already present if 'Model' not in df.columns: df['Model'] = None if 'URL' not in df.columns: df['URL'] = None model_link_pattern = r'\[(.*?)\]\((.*?)\)' for index, row in df.iterrows(): match = re.search(model_link_pattern, row['Model']) if match: df.at[index, 'Model'] = match.group(1) df.at[index, 'URL'] = match.group(2) return df def get_and_update_model_info(df): api = HfApi() df['Likes'] = -1 # Default value for Likes df['Tags'] = '' # Default value for Tags for index, row in df.iterrows(): if pd.notnull(row['Model']): model_info = cached_model_info(api, row['Model'].strip()) if model_info: df.at[index, 'Likes'] = model_info.likes df.at[index, 'Tags'] = ', '.join(model_info.tags) return df def calculate_highest_combined_score(data, column): # Ensure the column exists and has numeric data if column not in data.columns or not pd.api.types.is_numeric_dtype(data[column]): return column, {} scores = data[column].dropna().tolist() models = data['Model'].tolist() top_combinations = {r: [] for r in range(2, 7)} for r in range(2, 7): for combination in combinations(zip(scores, models), r): combined_score = sum(score for score, _ in combination) top_combinations[r].append((combined_score, tuple(model for _, model in combination))) top_combinations[r].sort(key=lambda x: x[0], reverse=True) top_combinations[r] = top_combinations[r][:3] return column, top_combinations def display_highest_combined_scores(data, score_columns): for column in score_columns: _, top_combinations = calculate_highest_combined_score(data, column) if top_combinations: st.subheader(f"Top Combinations for {column}") for r, combinations in top_combinations.items(): st.write(f"**Number of Models: {r}**") for score, combination in combinations: st.write(f"Score: {score}, Models: {', '.join(combination)}") # Function to create bar chart for a given category def create_bar_chart(df, category): """Create and display a bar chart for a given category.""" st.write(f"### {category} Scores") # Sort the DataFrame based on the category score sorted_df = df[['Model', category]].sort_values(by=category, ascending=True) # Create the bar chart with a color gradient (using 'Viridis' color scale as an example) fig = go.Figure(go.Bar( x=sorted_df[category], y=sorted_df['Model'], orientation='h', marker=dict(color=sorted_df[category], colorscale='Spectral') # You can change 'Viridis' to another color scale )) # Update layout for better readability fig.update_layout( margin=dict(l=20, r=20, t=20, b=20) ) # Adjust the height of the chart based on the number of rows in the DataFrame st.plotly_chart(fig, use_container_width=True, height=len(df) * 35) # Main function to run the Streamlit app def main(): # Set page configuration and title st.set_page_config(page_title="YALL - Yet Another LLM Leaderboard", layout="wide") st.title("🏆 YALL - Yet Another LLM Leaderboard") st.markdown("Leaderboard made with 🧐 [LLM AutoEval](https://github.com/mlabonne/llm-autoeval) using [Nous](https://huggingface.co/NousResearch) benchmark suite.") # Placeholder or logic to set 'content' with actual markdown or data content = create_yall() # Ensure 'content' has a value before proceeding if content: df = convert_markdown_table_to_dataframe(content) df = get_and_update_model_info(df) score_columns = ['Average', 'AGIEval', 'GPT4All', 'TruthfulQA', 'Bigbench'] for col in score_columns: if col in df.columns: df[col] = pd.to_numeric(df[col], errors='coerce') display_highest_combined_scores(df, score_columns) tab1, tab2 = st.tabs(["🏆 Leaderboard", "📝 About"]) # Leaderboard tab with tab1: if content: try: score_columns = ['Average', 'AGIEval', 'GPT4All', 'TruthfulQA', 'Bigbench'] # Display dataframe full_df = convert_markdown_table_to_dataframe(content) for col in score_columns: # Corrected use of pd.to_numeric full_df[col] = pd.to_numeric(full_df[col].str.strip(), errors='coerce') full_df = get_model_info(full_df) full_df['Tags'] = full_df['Tags'].fillna('') df = pd.DataFrame(columns=full_df.columns) # Toggles for filtering by tags show_phi = st.checkbox("Phi (2.8B)", value=True) show_mistral = st.checkbox("Mistral (7B)", value=True) show_other = st.checkbox("Other", value=True) # Create a DataFrame based on selected filters dfs_to_concat = [] if show_phi: dfs_to_concat.append(full_df[full_df['Tags'].str.lower().str.contains('phi,|phi-msft,')]) if show_mistral: dfs_to_concat.append(full_df[full_df['Tags'].str.lower().str.contains('mistral,')]) if show_other: other_df = full_df[~full_df['Tags'].str.lower().str.contains('phi,|phi-msft,|mistral,')] dfs_to_concat.append(other_df) # Concatenate the DataFrames if dfs_to_concat: df = pd.concat(dfs_to_concat, ignore_index=True) # Add a search bar search_query = st.text_input("Search models", "") # Filter the DataFrame based on the search query if search_query: df = df[df['Model'].str.contains(search_query, case=False)] # Display the filtered DataFrame or the entire leaderboard st.dataframe( df[['Model'] + score_columns + ['Likes', 'URL']], use_container_width=True, column_config={ "Likes": st.column_config.NumberColumn( "Likes", help="Number of likes on Hugging Face", format="%d ❤️", ), "URL": st.column_config.LinkColumn("URL"), }, hide_index=True, height=len(df) * 37, ) selected_models = st.multiselect('Select models to compare', df['Model'].unique()) comparison_df = df[df['Model'].isin(selected_models)] st.dataframe(comparison_df) # Add a button to export data to CSV if st.button("Export to CSV"): # Export the DataFrame to CSV csv_data = df.to_csv(index=False) # Create a link to download the CSV file st.download_button( label="Download CSV", data=csv_data, file_name="leaderboard.csv", key="download-csv", help="Click to download the CSV file", ) # Full-width plot for the first category create_bar_chart(df, score_columns[0]) # Next two plots in two columns col1, col2 = st.columns(2) with col1: create_bar_chart(df, score_columns[1]) with col2: create_bar_chart(df, score_columns[2]) # Last two plots in two columns col3, col4 = st.columns(2) with col3: create_bar_chart(df, score_columns[3]) with col4: create_bar_chart(df, score_columns[4]) except Exception as e: st.error("An error occurred while processing the markdown table.") st.error(str(e)) else: st.error("Failed to download the content from the URL provided.") # About tab with tab2: st.markdown(''' ### Nous benchmark suite Popularized by [Teknium](https://huggingface.co/teknium) and [NousResearch](https://huggingface.co/NousResearch), this benchmark suite aggregates four benchmarks: * [**AGIEval**](https://arxiv.org/abs/2304.06364) (0-shot): `agieval_aqua_rat,agieval_logiqa_en,agieval_lsat_ar,agieval_lsat_lr,agieval_lsat_rc,agieval_sat_en,agieval_sat_en_without_passage,agieval_sat_math` * **GPT4ALL** (0-shot): `hellaswag,openbookqa,winogrande,arc_easy,arc_challenge,boolq,piqa` * [**TruthfulQA**](https://arxiv.org/abs/2109.07958) (0-shot): `truthfulqa_mc` * [**Bigbench**](https://arxiv.org/abs/2206.04615) (0-shot): `bigbench_causal_judgement,bigbench_date_understanding,bigbench_disambiguation_qa,bigbench_geometric_shapes,bigbench_logical_deduction_five_objects,bigbench_logical_deduction_seven_objects,bigbench_logical_deduction_three_objects,bigbench_movie_recommendation,bigbench_navigate,bigbench_reasoning_about_colored_objects,bigbench_ruin_names,bigbench_salient_translation_error_detection,bigbench_snarks,bigbench_sports_understanding,bigbench_temporal_sequences,bigbench_tracking_shuffled_objects_five_objects,bigbench_tracking_shuffled_objects_seven_objects,bigbench_tracking_shuffled_objects_three_objects` ### Reproducibility You can easily reproduce these results using 🧐 [LLM AutoEval](https://github.com/mlabonne/llm-autoeval/tree/master), a colab notebook that automates the evaluation process (benchmark: `nous`). This will upload the results to GitHub as gists. You can find the entire table with the links to the detailed results [here](https://gist.github.com/mlabonne/90294929a2dbcb8877f9696f28105fdf). ### Clone this space You can create your own leaderboard with your LLM AutoEval results on GitHub Gist. You just need to clone this space and specify two variables: * Change the `gist_id` in [yall.py](https://huggingface.co/spaces/mlabonne/Yet_Another_LLM_Leaderboard/blob/main/yall.py#L126). * Create "New Secret" in Settings > Variables and secrets (name: "github", value: [your GitHub token](https://github.com/settings/tokens)) A special thanks to [gblazex](https://huggingface.co/gblazex) for providing many evaluations. ''') # Run the main function if this script is run directly if __name__ == "__main__": main()