w601sxs commited on
Commit
6d2dbdb
·
1 Parent(s): 61763a4

added dropdown

Browse files
Files changed (1) hide show
  1. app.py +22 -14
app.py CHANGED
@@ -82,22 +82,30 @@ def create_radar_chart(df, metric_columns):
82
 
83
 
84
  def main():
85
- st.set_page_config(page_title="LLM Leaderboard", layout="wide")
86
- st.title("🏆 SLM Leaderboard")
87
- st.markdown("We record Nous benchmark results for various SLMs. Please submit a PR to this [repo](https://github.com/amazon-science/aws-research-science/tree/main/SLMleaderboard) to inlcude your model! Heavily Inspired by [YALB](https://huggingface.co/spaces/mlabonne/Yet_Another_LLM_Leaderboard) ")
88
 
89
- # URL to your markdown file
90
- md_url = st.text_input("This the default location of the bechmarks and can be changed",
91
- "https://raw.githubusercontent.com/amazon-science/aws-research-science/refs/heads/main/SLMleaderboard/nous.md")
92
 
93
-
94
- st.markdown("""
95
- Copy the following links into the textbox above and refresh dashboard:
96
-
97
- - [Nous benchmark results](https://raw.githubusercontent.com/amazon-science/aws-research-science/refs/heads/main/SLMleaderboard/nous.md)
98
- - [Standard LLM benchmarks](https://raw.githubusercontent.com/amazon-science/aws-research-science/refs/heads/main/SLMleaderboard/standard.md) """)
99
-
100
-
 
 
 
 
 
 
 
 
 
101
 
102
  if not md_url:
103
  st.error("Please provide a valid URL to a markdown file containing the leaderboard table.")
 
82
 
83
 
84
  def main():
85
+ st.set_page_config(page_title="SLM Leaderboard", layout="wide")
86
+ st.title("\ud83c\udfc6 SLM Leaderboard")
87
+ st.markdown("We record Nous and standard benchmark results for various SLMs. Please submit a PR to this [repo](https://github.com/amazon-science/aws-research-science/tree/main/SLMleaderboard) to include your model! Heavily Inspired by [YALB](https://huggingface.co/spaces/mlabonne/Yet_Another_LLM_Leaderboard)")
88
 
89
+ # Default URL to your markdown file
90
+ default_url = "https://raw.githubusercontent.com/amazon-science/aws-research-science/refs/heads/main/SLMleaderboard/nous.md"
 
91
 
92
+ # Define benchmark URLs
93
+ benchmarks = {
94
+ "Nous benchmark results": "https://raw.githubusercontent.com/amazon-science/aws-research-science/refs/heads/main/SLMleaderboard/nous.md",
95
+ "Standard LLM benchmarks": "https://raw.githubusercontent.com/amazon-science/aws-research-science/refs/heads/main/SLMleaderboard/standard.md"
96
+ }
97
+
98
+ # User text input
99
+ md_url = st.text_input("This is the default location of the benchmarks and can be changed", default_url)
100
+
101
+ # Buttons to select benchmarks
102
+ for label, url in benchmarks.items():
103
+ if st.button(label):
104
+ md_url = url # Update the URL based on button clicked
105
+ st.experimental_rerun() # Refresh the dashboard
106
+
107
+ # Display the markdown file content
108
+ st.markdown(f"Current dataset URL: {md_url}")
109
 
110
  if not md_url:
111
  st.error("Please provide a valid URL to a markdown file containing the leaderboard table.")