Spaces:
Sleeping
Sleeping
import pandas as pd | |
import gradio as gr | |
data = { | |
"Method": [ | |
"OR-Tools", "ACO", "LKH3", "NN2Opt", "Tabu", | |
], | |
"TWCVRP": [ | |
57.90, 48.82, 51.35, 46.67, 45.58, | |
], | |
"TWCVRP": [ | |
59.11, 42.89, 49.06, 36.59, 33.59, | |
] | |
} | |
df = pd.DataFrame(data) | |
df['Average Score'] = df.iloc[:, 1:].mean(axis=1).round(2) | |
df = df[['Method', 'Average Score'] + [col for col in df.columns if col not in ['Method', 'Average Score']]] | |
def display_data(): | |
return df | |
with gr.Blocks() as demo: | |
# gr.Markdown("", elem_id="camel-icon") # Replace with actual camel icon URL | |
gr.Markdown("# **SVRP-Bench: Real-World Vehicle Routing Benchmark**") | |
gr.Markdown(""" | |
This table shows the performance of different models across various tasks/datasets including CVRP and TWVRP. | |
""") | |
with gr.Tabs(elem_classes="tab-buttons") as tabs: | |
with gr.TabItem("๐ LLM Leaderboard", elem_id="llm-benchmark-tab-table", id=0): | |
# with gr.Row(): | |
# with gr.Column(): | |
gr.Dataframe(value=df, label="SVRP-Bench Model Performance", interactive=False) | |
with gr.TabItem("๐ค How to Submit", elem_id="submission-tab", id=1): | |
gr.Markdown(""" | |
## Submission Instructions | |
To contribute your model's results to the SVRP-Bench leaderboard: | |
- **Via Email**: | |
- Send your results to **[email protected]**, and weโll add them to the leaderboard for you. | |
**We look forward to seeing your contributions!** | |
""") | |
demo.launch() | |