Spaces:
Running
Running
app: rework of leaderboard (now show tabs for each dataset, improve description)
Browse files
app.py
CHANGED
@@ -2,18 +2,42 @@ import gradio as gr
|
|
2 |
import pandas as pd
|
3 |
|
4 |
title = """
|
5 |
-
# hmLeaderboard
|
6 |
|
7 |

|
8 |
"""
|
9 |
|
10 |
description = """
|
11 |
-
##
|
12 |
|
13 |
At the moment the following models are supported:
|
14 |
|
15 |
* hmBERT: [Historical Multilingual Language Models for Named Entity Recognition](https://huggingface.co/hmbert).
|
16 |
* hmTEAMS: [Historic Multilingual TEAMS Models](https://huggingface.co/hmteams).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
"""
|
18 |
footer = "Made from Bavarian Oberland with ❤️ and 🥨."
|
19 |
|
@@ -63,23 +87,51 @@ def perform_evaluation_for_languages(model_selection, selected_languages):
|
|
63 |
|
64 |
return result_df
|
65 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
with gr.Blocks() as demo:
|
67 |
gr.Markdown(title)
|
68 |
gr.Markdown(description)
|
69 |
|
70 |
with gr.Tab("Overview"):
|
71 |
-
gr.Markdown("### Best Configuration
|
|
|
72 |
|
73 |
df_result = perform_evaluation_for_datasets("Best Configuration", dataset_names)
|
74 |
|
75 |
gr.Dataframe(value=df_result)
|
76 |
|
77 |
-
gr.Markdown("### Best Model
|
|
|
78 |
|
79 |
df_result = perform_evaluation_for_datasets("Best Model", dataset_names)
|
80 |
|
81 |
gr.Dataframe(value=df_result)
|
82 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
with gr.Tab("Filtering"):
|
84 |
|
85 |
gr.Markdown("### Filtering\nSwiss-knife filtering for single datasets and languages is possible.")
|
|
|
2 |
import pandas as pd
|
3 |
|
4 |
title = """
|
5 |
+
# hmLeaderboard: Space for tracking and ranking models on Historic NER Datasets
|
6 |
|
7 |

|
8 |
"""
|
9 |
|
10 |
description = """
|
11 |
+
## Models
|
12 |
|
13 |
At the moment the following models are supported:
|
14 |
|
15 |
* hmBERT: [Historical Multilingual Language Models for Named Entity Recognition](https://huggingface.co/hmbert).
|
16 |
* hmTEAMS: [Historic Multilingual TEAMS Models](https://huggingface.co/hmteams).
|
17 |
+
|
18 |
+
## Datasets
|
19 |
+
|
20 |
+
We test our pretrained language models on various datasets from HIPE-2020, HIPE-2022 and Europeana. The following table
|
21 |
+
shows an overview of used datasets.
|
22 |
+
|
23 |
+
|
24 |
+
| Language | Datasets |
|
25 |
+
|----------|------------------------------------------------------------------|
|
26 |
+
| English | [AjMC] - [TopRes19th] |
|
27 |
+
| German | [AjMC] - [NewsEye] - [HIPE-2020] |
|
28 |
+
| French | [AjMC] - [ICDAR-Europeana] - [LeTemps] - [NewsEye] - [HIPE-2020] |
|
29 |
+
| Finnish | [NewsEye] |
|
30 |
+
| Swedish | [NewsEye] |
|
31 |
+
| Dutch | [ICDAR-Europeana] |
|
32 |
+
|
33 |
+
[AjMC]: https://github.com/hipe-eval/HIPE-2022-data/blob/main/documentation/README-ajmc.md
|
34 |
+
[NewsEye]: https://github.com/hipe-eval/HIPE-2022-data/blob/main/documentation/README-newseye.md
|
35 |
+
[TopRes19th]: https://github.com/hipe-eval/HIPE-2022-data/blob/main/documentation/README-topres19th.md
|
36 |
+
[ICDAR-Europeana]: https://github.com/stefan-it/historic-domain-adaptation-icdar
|
37 |
+
[LeTemps]: https://github.com/hipe-eval/HIPE-2022-data/blob/main/documentation/README-letemps.md
|
38 |
+
[HIPE-2020]: https://github.com/hipe-eval/HIPE-2022-data/blob/main/documentation/README-hipe2020.md
|
39 |
+
|
40 |
+
## Results
|
41 |
"""
|
42 |
footer = "Made from Bavarian Oberland with ❤️ and 🥨."
|
43 |
|
|
|
87 |
|
88 |
return result_df
|
89 |
|
90 |
+
dataset_to_description_mapping = {
|
91 |
+
"AjMC": "#### AjMC\nThe AjMC dataset consists of NE-annotated historical commentaries in the field of Classics, and was created in the context of the [Ajax MultiCommentary](https://mromanello.github.io/ajax-multi-commentary/) project.\n\nThe following NEs were annotated: `pers`, `work`, `loc`, `object`, `date` and `scope`.",
|
92 |
+
"NewsEye": "#### NewsEye\nThe NewsEye dataset is comprised of diachronic historical newspaper material published between 1850 and 1950 in French, German, Finnish, and Swedish. More information can be found [here](https://dl.acm.org/doi/abs/10.1145/3404835.3463255).\n\nThe following NEs were annotated: `PER`, `LOC`, `ORG` and `HumanProd`.",
|
93 |
+
"ICDAR": "#### ICDAR\nThe ICDAR-Europeana NER Dataset is a preprocessed variant of the [Europeana NER Corpora](https://github.com/EuropeanaNewspapers/ner-corpora) for Dutch and French.\n\nThe following NEs were annotated: `PER`, `LOC` and `ORG`.",
|
94 |
+
"LeTemps": "#### LeTemps\nThe LeTemps dataset consists of NE-annotated historical French newspaper articles from mid-19C to mid 20C.\n\nThe following NEs were annotated: `loc`, `org` and `pers`.",
|
95 |
+
"TopRes19th": "#### TopRes19th\nThe TopRes19th dataset consists of NE-annotated historical English newspaper articles from 19C.\n\nThe following NEs were annotated: `BUILDING`, `LOC` and `STREET`.",
|
96 |
+
"HIPE-2020": "#### HIPE-2020\nThe HIPE-2020 dataset is comprised of newspapers from mid 19C to mid 20C. For information can be found [here](https://dl.acm.org/doi/abs/10.1007/978-3-030-58219-7_21).\n\nThe following NEs were annotated: `loc`, `org`, `pers`, `prod`, `time` and `comp`.",
|
97 |
+
}
|
98 |
+
|
99 |
+
configuration_to_description_mapping = {
|
100 |
+
"Best Configuration": "The best hyper-parameter configuration for each model is used and average F1-score over runs with different seeds is reported here:",
|
101 |
+
"Best Model": "The best hyper-parameter configuration for each model is used, the model with highest F1-score is chosen and its performance is reported here:"
|
102 |
+
}
|
103 |
+
|
104 |
with gr.Blocks() as demo:
|
105 |
gr.Markdown(title)
|
106 |
gr.Markdown(description)
|
107 |
|
108 |
with gr.Tab("Overview"):
|
109 |
+
gr.Markdown("### Best Configuration")
|
110 |
+
gr.Markdown(configuration_to_description_mapping["Best Configuration"])
|
111 |
|
112 |
df_result = perform_evaluation_for_datasets("Best Configuration", dataset_names)
|
113 |
|
114 |
gr.Dataframe(value=df_result)
|
115 |
|
116 |
+
gr.Markdown("### Best Model")
|
117 |
+
gr.Markdown(configuration_to_description_mapping["Best Model"])
|
118 |
|
119 |
df_result = perform_evaluation_for_datasets("Best Model", dataset_names)
|
120 |
|
121 |
gr.Dataframe(value=df_result)
|
122 |
|
123 |
+
for dataset_name, dataset_description in dataset_to_description_mapping.items():
|
124 |
+
with gr.Tab(dataset_name):
|
125 |
+
selected_datasets = [ds for ds in dataset_names if dataset_name.lower() in ds.lower()]
|
126 |
+
|
127 |
+
gr.Markdown(dataset_description)
|
128 |
+
|
129 |
+
for config in ["Best Configuration", "Best Model"]:
|
130 |
+
gr.Markdown(f"##### Results for {config}")
|
131 |
+
gr.Markdown(configuration_to_description_mapping[config])
|
132 |
+
df_result = perform_evaluation_for_datasets(config, selected_datasets)
|
133 |
+
gr.Dataframe(value=df_result)
|
134 |
+
|
135 |
with gr.Tab("Filtering"):
|
136 |
|
137 |
gr.Markdown("### Filtering\nSwiss-knife filtering for single datasets and languages is possible.")
|