|
import streamlit as st |
|
from streamlit_option_menu import option_menu |
|
from word2vec import * |
|
import pandas as pd |
|
from autocomplete import * |
|
from vector_graph import * |
|
from plots import * |
|
from lsj_dict import * |
|
import json |
|
from streamlit_tags import st_tags, st_tags_sidebar |
|
|
|
|
|
st.set_page_config(page_title="Ancient Greek Word2Vec", layout="centered") |
|
|
|
|
|
@st.cache_data |
|
def load_lsj_dict(): |
|
return json.load(open('lsj_dict.json', 'r')) |
|
|
|
@st.cache_data |
|
def load_all_models_words(): |
|
return sorted(load_compressed_word_list('corpora/compass_filtered.pkl.gz'), key=custom_sort) |
|
|
|
|
|
@st.cache_data |
|
def load_models_for_word_dict(): |
|
return word_in_models_dict('corpora/compass_filtered.pkl.gz') |
|
|
|
|
|
all_models_words = load_all_models_words() |
|
|
|
|
|
|
|
lemma_dict = load_lsj_dict() |
|
|
|
|
|
models_for_word_dict = load_models_for_word_dict() |
|
|
|
|
|
|
|
active_tab = option_menu(None, ["Nearest neighbours", "Cosine similarity", "3D graph", 'Dictionary'], |
|
menu_icon="cast", default_index=0, orientation="horizontal") |
|
|
|
|
|
|
|
if active_tab == "Nearest neighbours": |
|
|
|
|
|
eligible_models = ["Archaic", "Classical", "Hellenistic", "Early Roman", "Late Roman"] |
|
all_models_words = load_all_models_words() |
|
|
|
with st.container(): |
|
st.markdown("## Nearest Neighbours") |
|
target_word = st.multiselect("Enter a word", options=all_models_words, max_selections=1) |
|
if len(target_word) > 0: |
|
target_word = target_word[0] |
|
|
|
eligible_models = models_for_word_dict[target_word] |
|
|
|
models = st.multiselect( |
|
"Select models to search for neighbours", |
|
eligible_models |
|
) |
|
n = st.slider("Number of neighbours", 1, 50, 15) |
|
|
|
nearest_neighbours_button = st.button("Find nearest neighbours") |
|
|
|
if nearest_neighbours_button: |
|
if validate_nearest_neighbours(target_word, n, models) == False: |
|
st.error('Please fill in all fields') |
|
else: |
|
|
|
models = load_selected_models(models) |
|
|
|
nearest_neighbours = get_nearest_neighbours(target_word, n, models) |
|
|
|
all_dfs = [] |
|
|
|
|
|
for model in nearest_neighbours.keys(): |
|
st.write(f"### {model}") |
|
df = pd.DataFrame( |
|
nearest_neighbours[model], |
|
columns = ['Word', 'Cosine Similarity'] |
|
) |
|
|
|
all_dfs.append((model, df)) |
|
st.table(df) |
|
|
|
|
|
|
|
tmp_file = store_df_in_temp_file(all_dfs) |
|
|
|
|
|
with open(tmp_file, "rb") as file: |
|
file_byte = file.read() |
|
|
|
|
|
st.download_button( |
|
"Download results", |
|
data=file_byte, |
|
file_name = f'nearest_neighbours_{target_word}_TEST.xlsx', |
|
mime='application/octet-stream' |
|
) |
|
|
|
|
|
|
|
elif active_tab == "Cosine similarity": |
|
all_models_words = load_all_models_words() |
|
|
|
with st.container(): |
|
eligible_models_1 = [] |
|
eligible_models_2 = [] |
|
st.markdown("## Cosine similarity") |
|
col1, col2 = st.columns(2) |
|
col3, col4 = st.columns(2) |
|
with col1: |
|
word_1 = st.multiselect("Enter a word", placeholder="πατήρ", max_selections=1, options=all_models_words) |
|
if len(word_1) > 0: |
|
word_1 = word_1[0] |
|
eligible_models_1 = models_for_word_dict[word_1] |
|
|
|
with col2: |
|
time_slice_1 = st.selectbox("Time slice word 1", options = eligible_models_1) |
|
|
|
|
|
with st.container(): |
|
with col3: |
|
word_2 = st.multiselect("Enter a word", placeholder="μήτηρ", max_selections=1, options=all_models_words) |
|
if len(word_2) > 0: |
|
word_2 = word_2[0] |
|
eligible_models_2 = models_for_word_dict[word_2] |
|
|
|
with col4: |
|
time_slice_2 = st.selectbox("Time slice word 2", eligible_models_2) |
|
|
|
|
|
cosine_similarity_button = st.button("Calculate cosine similarity") |
|
|
|
|
|
if cosine_similarity_button: |
|
cosine_simularity_score = get_cosine_similarity(word_1, time_slice_1, word_2, time_slice_2) |
|
st.write(cosine_simularity_score) |
|
|
|
|
|
elif active_tab == "3D graph": |
|
col1, col2 = st.columns(2) |
|
|
|
|
|
all_models_words = load_all_models_words() |
|
|
|
with st.container(): |
|
with col1: |
|
word = st.multiselect("Enter a word", all_models_words, max_selections=1) |
|
if len(word) > 0: |
|
word = word[0] |
|
|
|
with col2: |
|
time_slice = st.selectbox("Time slice", ["Archaic", "Classical", "Hellenistic", "Early Roman", "Late Roman"]) |
|
|
|
n = st.slider("Number of words", 1, 50, 15) |
|
|
|
graph_button = st.button("Create 3D graph") |
|
|
|
if graph_button: |
|
time_slice_model = convert_time_name_to_model(time_slice) |
|
nearest_neighbours_vectors = get_nearest_neighbours_vectors(word, time_slice_model, n) |
|
|
|
st.dataframe(nearest_neighbours_vectors) |
|
|
|
|
|
|
|
|
|
fig, df = make_3d_plot4(nearest_neighbours_vectors, word, time_slice_model) |
|
|
|
st.dataframe(df) |
|
|
|
st.plotly_chart(fig) |
|
|
|
|
|
|
|
|
|
|
|
elif active_tab == "Dictionary": |
|
|
|
with st.container(): |
|
all_models_words = load_all_models_words() |
|
|
|
|
|
|
|
query_tag = st_tags(label = 'Search a word in the LSJ dictionary', |
|
text = '', |
|
value = [], |
|
suggestions = all_models_words, |
|
maxtags = 1, |
|
key = '1' |
|
) |
|
|
|
|
|
if query_tag: |
|
st.write(f"### {query_tag[0]}") |
|
|
|
|
|
if query_tag[0] in lemma_dict: |
|
data = lemma_dict[query_tag[0]] |
|
elif query_tag[0].capitalize() in lemma_dict: |
|
data = lemma_dict[query_tag[0].capitalize()] |
|
else: |
|
st.error("Word not found in dictionary") |
|
|
|
|
|
text = format_text(data) |
|
|
|
st.markdown(text) |
|
|
|
|
|
|
|
|
|
|
|
|