|
import streamlit as st |
|
from streamlit_option_menu import option_menu |
|
from word2vec import * |
|
import pandas as pd |
|
from autocomplete import * |
|
from vector_graph import * |
|
from plots import * |
|
from lsj_dict import * |
|
import json |
|
from streamlit_tags import st_tags, st_tags_sidebar |
|
|
|
|
|
st.set_page_config(page_title="ἄγαλμα | AGALMA", layout="centered", page_icon="images/AGALMA_logo.png") |
|
|
|
|
|
@st.cache_data |
|
def load_lsj_dict(): |
|
return json.load(open('lsj_dict.json', 'r')) |
|
|
|
@st.cache_data |
|
def load_all_models_words(): |
|
return sorted(load_compressed_word_list('corpora/compass_filtered.pkl.gz'), key=custom_sort) |
|
|
|
@st.cache_data |
|
def load_models_for_word_dict(): |
|
return word_in_models_dict('corpora/compass_filtered.pkl.gz') |
|
|
|
|
|
@st.cache_data |
|
def load_all_lemmas(): |
|
return load_compressed_word_list('all_lemmas.pkl.gz') |
|
|
|
@st.cache_data |
|
def load_lemma_count_dict(): |
|
return count_lemmas('lemma_list_raw') |
|
|
|
|
|
all_models_words = load_all_models_words() |
|
|
|
|
|
lemma_dict = load_lsj_dict() |
|
|
|
|
|
models_for_word_dict = load_models_for_word_dict() |
|
|
|
lemma_counts = load_lemma_count_dict() |
|
|
|
|
|
|
|
|
|
|
|
styles_horizontal = { |
|
"container": {"display": "flex", "justify-content": "center"}, |
|
"nav": {"display": "flex", "gap": "2px", "margin": "5px"}, |
|
"nav-item": {"flex": "1", "font-family": "Sans-serif"}, |
|
"nav-link": { |
|
"background-color": "#f0f0f0", |
|
"border": "1px solid #ccc", |
|
"border-radius": "5px", |
|
"padding": "10px", |
|
"width": "100px", |
|
"height": "60px", |
|
"display": "flex", |
|
"align-items": "center", |
|
"justify-content": "center", |
|
"transition": "background-color 0.3s, color 0.3s", |
|
"color": "black", |
|
"text-decoration": "none" |
|
}, |
|
"nav-link:hover": { |
|
"background-color": "rgb(238, 238, 238)", |
|
"color": "#000" |
|
}, |
|
"nav-link-selected": { |
|
"background-color": "#B8E52B", |
|
"color": "white", |
|
"font-weight": "bold" |
|
}, |
|
"icon": {"display": "None"} |
|
} |
|
|
|
|
|
styles_vertical = { |
|
"nav-link-selected": { |
|
"background-color": "#B8E52B", |
|
"color": "white", |
|
"font-weight": "bold" |
|
} |
|
} |
|
|
|
with st.sidebar: |
|
st.image('images/AGALMA_logo.png', width=250) |
|
st.markdown('# ἄγαλμα | AGALMA') |
|
selected = option_menu(None, ["App", "About", "FAQ", "License"], |
|
menu_icon="menu", default_index=0, orientation="vertical", styles=styles_vertical) |
|
|
|
if selected == "App": |
|
|
|
active_tab = option_menu(None, ["Nearest neighbours", "Cosine similarity", "3D graph", 'Dictionary'], |
|
menu_icon="cast", default_index=0, orientation="horizontal", styles=styles_horizontal) |
|
|
|
|
|
|
|
st.markdown(""" |
|
<style> |
|
/* Define a class to remove list-style-type */ |
|
.no-list-style { |
|
list-style-type: none; |
|
} |
|
</style> |
|
""", unsafe_allow_html=True) |
|
|
|
|
|
|
|
|
|
if active_tab == "Nearest neighbours": |
|
|
|
|
|
eligible_models = ["Archaic", "Classical", "Hellenistic", "Early Roman", "Late Roman"] |
|
all_models_words = load_all_models_words() |
|
|
|
with st.container(): |
|
st.markdown("## Nearest Neighbours") |
|
st.markdown('Here you can extract the nearest neighbours to a chosen lemma. Please select one or more time slices and the preferred number of nearest neighbours.') |
|
target_word = st.multiselect("Enter a word", options=all_models_words, max_selections=1) |
|
if len(target_word) > 0: |
|
target_word = target_word[0] |
|
|
|
eligible_models = models_for_word_dict[target_word] |
|
|
|
models = st.multiselect( |
|
"Select models to search for neighbours", |
|
eligible_models |
|
) |
|
n = st.slider("Number of neighbours", 1, 50, 15) |
|
|
|
nearest_neighbours_button = st.button("Find nearest neighbours") |
|
|
|
if nearest_neighbours_button: |
|
if validate_nearest_neighbours(target_word, n, models) == False: |
|
st.error('Please fill in all fields') |
|
else: |
|
|
|
models = load_selected_models(models) |
|
|
|
nearest_neighbours = get_nearest_neighbours(target_word, n, models) |
|
|
|
all_dfs = [] |
|
|
|
|
|
for model in nearest_neighbours.keys(): |
|
st.write(f"### {model}") |
|
df = pd.DataFrame( |
|
nearest_neighbours[model], |
|
columns = ['Word', 'Cosine Similarity'] |
|
) |
|
|
|
|
|
df['Occurences'] = df['Word'].apply(lambda x: lemma_counts[model][x]) |
|
|
|
|
|
|
|
all_dfs.append((model, df)) |
|
st.table(df) |
|
|
|
|
|
|
|
tmp_file = store_df_in_temp_file(all_dfs) |
|
|
|
|
|
with open(tmp_file, "rb") as file: |
|
file_byte = file.read() |
|
|
|
|
|
st.download_button( |
|
"Download results", |
|
data=file_byte, |
|
file_name = f'nearest_neighbours_{target_word}_TEST.xlsx', |
|
mime='application/octet-stream' |
|
) |
|
|
|
|
|
|
|
elif active_tab == "Cosine similarity": |
|
all_models_words = load_all_models_words() |
|
|
|
with st.container(): |
|
eligible_models_1 = [] |
|
eligible_models_2 = [] |
|
st.markdown("## Cosine similarity") |
|
st.markdown('Here you can extract the cosine similarity between two lemmas. Please select a time slice for each lemma. You can also calculate the cosine similarity between two vectors of the same lemma in different time slices.') |
|
col1, col2 = st.columns(2) |
|
col3, col4 = st.columns(2) |
|
with col1: |
|
word_1 = st.multiselect("Enter a word", placeholder="πατήρ", max_selections=1, options=all_models_words) |
|
if len(word_1) > 0: |
|
word_1 = word_1[0] |
|
eligible_models_1 = models_for_word_dict[word_1] |
|
|
|
with col2: |
|
time_slice_1 = st.selectbox("Time slice word 1", options = eligible_models_1) |
|
|
|
|
|
with st.container(): |
|
with col3: |
|
word_2 = st.multiselect("Enter a word", placeholder="μήτηρ", max_selections=1, options=all_models_words) |
|
if len(word_2) > 0: |
|
word_2 = word_2[0] |
|
eligible_models_2 = models_for_word_dict[word_2] |
|
|
|
with col4: |
|
time_slice_2 = st.selectbox("Time slice word 2", eligible_models_2) |
|
|
|
|
|
cosine_similarity_button = st.button("Calculate cosine similarity") |
|
|
|
|
|
if cosine_similarity_button: |
|
cosine_simularity_score = get_cosine_similarity(word_1, time_slice_1, word_2, time_slice_2) |
|
st.write(cosine_simularity_score) |
|
|
|
|
|
elif active_tab == "3D graph": |
|
st.markdown("## 3D graph") |
|
st.markdown('Here you can generate a 3D representation of the semantic space surrounding a target lemma. Please choose the lemma and the time slice.') |
|
|
|
col1, col2 = st.columns(2) |
|
|
|
|
|
all_models_words = load_all_models_words() |
|
|
|
with st.container(): |
|
eligible_models = [] |
|
with col1: |
|
word = st.multiselect("Enter a word", all_models_words, max_selections=1) |
|
if len(word) > 0: |
|
word = word[0] |
|
eligible_models = models_for_word_dict[word] |
|
|
|
with col2: |
|
time_slice = st.selectbox("Time slice", eligible_models) |
|
|
|
n = st.slider("Number of words", 1, 50, 15) |
|
|
|
graph_button = st.button("Create 3D graph") |
|
|
|
if graph_button: |
|
time_slice_model = convert_time_name_to_model(time_slice) |
|
nearest_neighbours_vectors = get_nearest_neighbours_vectors(word, time_slice_model, n) |
|
|
|
fig, df = make_3d_plot_tSNE(nearest_neighbours_vectors, word, time_slice_model) |
|
|
|
st.plotly_chart(fig) |
|
|
|
|
|
|
|
|
|
|
|
|
|
elif active_tab == "Dictionary": |
|
|
|
with st.container(): |
|
st.markdown('## Dictionary') |
|
st.markdown('Search a word in the Liddell-Scott-Jones dictionary (only Greek, no whitespaces).') |
|
|
|
|
|
all_lemmas = load_all_lemmas() |
|
|
|
|
|
|
|
query_tag = st_tags(label='', |
|
text = '', |
|
value = [], |
|
suggestions = all_lemmas, |
|
maxtags = 1, |
|
key = '1' |
|
) |
|
|
|
|
|
if query_tag: |
|
st.write(f"### {query_tag[0]}") |
|
|
|
|
|
if query_tag[0] in lemma_dict: |
|
data = lemma_dict[query_tag[0]] |
|
elif query_tag[0].capitalize() in lemma_dict: |
|
data = lemma_dict[query_tag[0].capitalize()] |
|
else: |
|
st.error("Word not found in dictionary") |
|
|
|
|
|
text = format_text(data) |
|
|
|
|
|
st.markdown(format_text(data), unsafe_allow_html = True) |
|
|
|
|
|
|
|
st.markdown(""" |
|
<style> |
|
.tab { |
|
display: inline-block; |
|
margin-left: 4em; |
|
} |
|
.tr { |
|
font-weight: bold; |
|
} |
|
.list-class { |
|
list-style-type: none; |
|
margin-top: 1em; |
|
} |
|
.primary-indicator { |
|
font-weight: bold; |
|
font-size: x-large; |
|
} |
|
.secondary-indicator { |
|
font-weight: bold; |
|
font-size: large; |
|
} |
|
.tertiary-indicator { |
|
font-weight: bold; |
|
font-size: medium; |
|
} |
|
.quaternary-indicator { |
|
font-weight: bold; |
|
font-size: medium; |
|
} |
|
.primary-class { |
|
padding-left: 2em; |
|
} |
|
.secondary-class { |
|
padding-left: 4em; |
|
} |
|
.tertiary-class { |
|
padding-left: 6em; |
|
} |
|
.quaternary-class { |
|
padding-left: 8em; |
|
} |
|
</style> |
|
""", unsafe_allow_html=True) |
|
|
|
|
|
|
|
|
|
if selected == "About": |
|
st.markdown(""" |
|
## About |
|
Welcome to AGALMA | ἄγαλμα, the Ancient Greek Accessible Language Models for linguistic Analysis! |
|
|
|
This interface was developed in the framework of Silvia Stopponi’s PhD project, \ |
|
supervised by Saskia Peels-Matthey and Malvina Nissim at the University of Groningen (The Netherlands). \ |
|
The aim of this tool is to make language models trained on Ancient Greek available to all interested people, respectless of their coding skills. \ |
|
|
|
The following people were involved in the creation of this interface: |
|
|
|
**Mark den Ouden** developed the interface. |
|
|
|
**Silvia Stopponi** trained the models, defined the structure of the interface, and wrote the textual content. |
|
|
|
**Saskia Peels-Matthey** supervised the project and revised the structure of the interface and the textual content. |
|
|
|
**Malvina Nissim** supervised the project. |
|
|
|
**Anchoring Innovation** financially supported the creation of this interface. \ |
|
Anchoring Innovation is the Gravitation Grant research agenda of the Dutch National Research School in Classical Studies, OIKOS. \ |
|
It is financially supported by the Dutch ministry of Education, Culture and Science (NWO project number 024.003.012). |
|
|
|
<div style="text-align: center; font-weight: bold;">How to cite</div> |
|
|
|
If you use this interface for your research, please cite it as: |
|
|
|
Stopponi, Silvia, Mark den Ouden, Saskia Peels-Matthey & Malvina Nissim. 2024. \ |
|
<span style="font-style: italic;">AGALMA: Ancient Greek Accessible Language Models for linguistic Analysis.</span> |
|
|
|
""", unsafe_allow_html=True) |
|
|
|
|
|
if selected == "FAQ": |
|
st.markdown(""" |
|
## FAQ |
|
""") |
|
|
|
|
|
|
|
local_css("style.css") |
|
with st.expander(r"$\textsf{\Large Which models is this interface based on?}$"): |
|
st.write( |
|
"This interface is based on five language models. \ |
|
Language models are statistical models of language, \ |
|
which store statistical information about word co-occurrence during the training phase. \ |
|
During training they process a corpus of texts in the target language(s). \ |
|
Once trained, models can be used to extract information about the language \ |
|
(in this interface, we focus on the extraction of semantic information) or to perform specific linguistic tasks. \ |
|
The models on which this interface is based are Word Embedding models." |
|
) |
|
|
|
with st.expander(r"$\textsf{\Large Which corpus was used to train the models?}$"): |
|
st.write( |
|
"The five models on which this interface is based were trained on five slices of the Diorisis Ancient Greek Corpus (Vatri & McGillivray 2018)." |
|
) |
|
|
|
|
|
if selected == "License": |
|
st.markdown(""" |
|
## License |
|
The cosine similarity, nearest neighbours, and 3D representation data are licensed under a CC BY License. |
|
|
|
The LSJ dictionary has a CC BY-SA license and comes from the Unicode version of the dictionary produced by \ |
|
[Giuseppe G. A. Celano](%s). The original (Betacode) version is provided under a CC BY-SA license by the [Perseus Digital Library](https://www.perseus.tufts.edu/). \ |
|
Data available at https://github.com/PerseusDL/lexica/. |
|
""" % 'https://github.com/gcelano/LSJ_GreekUnicode?tab=readme-ov-file') |
|
|
|
|
|
|
|
streamlit_style = """ |
|
<style> |
|
html, body { |
|
font-family: 'Helvetica'; |
|
} |
|
</style> |
|
""" |
|
|
|
st.markdown(streamlit_style, unsafe_allow_html=True) |