w601sxs commited on
Commit
992f74a
·
1 Parent(s): 6d2dbdb
Files changed (2) hide show
  1. .gitignore +3 -0
  2. app.py +11 -21
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ .venv
2
+ .venv/
3
+ .venv/*
app.py CHANGED
@@ -83,29 +83,19 @@ def create_radar_chart(df, metric_columns):
83
 
84
  def main():
85
  st.set_page_config(page_title="SLM Leaderboard", layout="wide")
86
- st.title("\ud83c\udfc6 SLM Leaderboard")
87
- st.markdown("We record Nous and standard benchmark results for various SLMs. Please submit a PR to this [repo](https://github.com/amazon-science/aws-research-science/tree/main/SLMleaderboard) to include your model! Heavily Inspired by [YALB](https://huggingface.co/spaces/mlabonne/Yet_Another_LLM_Leaderboard)")
88
 
89
- # Default URL to your markdown file
90
- default_url = "https://raw.githubusercontent.com/amazon-science/aws-research-science/refs/heads/main/SLMleaderboard/nous.md"
 
91
 
92
- # Define benchmark URLs
93
- benchmarks = {
94
- "Nous benchmark results": "https://raw.githubusercontent.com/amazon-science/aws-research-science/refs/heads/main/SLMleaderboard/nous.md",
95
- "Standard LLM benchmarks": "https://raw.githubusercontent.com/amazon-science/aws-research-science/refs/heads/main/SLMleaderboard/standard.md"
96
- }
97
-
98
- # User text input
99
- md_url = st.text_input("This is the default location of the benchmarks and can be changed", default_url)
100
-
101
- # Buttons to select benchmarks
102
- for label, url in benchmarks.items():
103
- if st.button(label):
104
- md_url = url # Update the URL based on button clicked
105
- st.experimental_rerun() # Refresh the dashboard
106
-
107
- # Display the markdown file content
108
- st.markdown(f"Current dataset URL: {md_url}")
109
 
110
  if not md_url:
111
  st.error("Please provide a valid URL to a markdown file containing the leaderboard table.")
 
83
 
84
  def main():
85
  st.set_page_config(page_title="SLM Leaderboard", layout="wide")
86
+ st.title("🏆 SLM Leaderboard")
87
+ st.markdown("We record Nous and Standard benchmark results for various SLMs. Please submit a PR to this [repo](https://github.com/amazon-science/aws-research-science/tree/main/SLMleaderboard) to inlcude your model! Heavily Inspired by [YALB](https://huggingface.co/spaces/mlabonne/Yet_Another_LLM_Leaderboard) ")
88
 
89
+ # URL to your markdown file
90
+ md_url = st.text_input("This the default location of the bechmarks and can be changed",
91
+ "https://raw.githubusercontent.com/amazon-science/aws-research-science/refs/heads/main/SLMleaderboard/nous.md")
92
 
93
+
94
+ st.markdown("""
95
+ Copy the following links into the textbox above and refresh dashboard:
96
+
97
+ - [Nous benchmark results](https://raw.githubusercontent.com/amazon-science/aws-research-science/refs/heads/main/SLMleaderboard/nous.md)
98
+ - [Standard LLM benchmarks](https://raw.githubusercontent.com/amazon-science/aws-research-science/refs/heads/main/SLMleaderboard/standard.md) """)
 
 
 
 
 
 
 
 
 
 
 
99
 
100
  if not md_url:
101
  st.error("Please provide a valid URL to a markdown file containing the leaderboard table.")