File size: 31,343 Bytes
0d0f07a
 
 
14c3a4f
51778ca
7b3478d
dcc32dc
c7d9cf9
554cc8f
c7d9cf9
0d0f07a
e0ac5a8
0d0f07a
aa5c198
 
 
 
 
 
 
ee625fc
aa5c198
cdb0a70
 
 
 
94e7f68
 
 
 
 
e99824c
 
 
 
aa5c198
 
 
 
 
 
cdb0a70
 
 
e99824c
 
aa5c198
dde99f4
703ce10
edd0cd0
703ce10
 
76ee033
703ce10
 
 
 
 
76ee033
703ce10
 
 
 
 
 
 
 
 
 
 
 
 
dde99f4
703ce10
 
 
 
 
 
edd0cd0
 
 
 
 
 
 
 
 
c79bcd9
 
 
 
 
 
 
 
 
 
 
 
 
dde99f4
9ee85ae
76ee033
01b39fb
be22c70
dde99f4
 
 
 
edd0cd0
0d0f07a
c7d9cf9
dde99f4
 
 
 
 
 
 
 
 
2605d63
 
 
dde99f4
 
0d0f07a
dde99f4
 
 
0d0f07a
dde99f4
 
593c285
 
 
 
 
dde99f4
 
 
 
 
910ea33
dde99f4
 
 
910ea33
dde99f4
 
 
6640785
dde99f4
 
 
 
 
 
6640785
dde99f4
b24ad56
dde99f4
 
 
 
 
 
 
 
910ea33
dde99f4
 
 
 
 
 
 
 
14c3a4f
ee625fc
dde99f4
 
 
 
 
 
 
 
 
 
 
1eab639
dde99f4
 
 
bdf0a5e
dde99f4
 
 
 
 
 
 
 
593c285
 
 
 
 
 
dde99f4
 
 
 
 
 
 
 
 
 
ee625fc
dde99f4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76ee033
dde99f4
 
 
 
c79bcd9
 
593c285
 
c79bcd9
 
 
 
 
dde99f4
 
 
 
 
 
ee625fc
dde99f4
 
 
 
 
 
ee625fc
dde99f4
 
7b3478d
dde99f4
0d0f07a
dde99f4
7b3478d
dde99f4
 
 
 
 
 
 
 
 
 
7b3478d
dde99f4
 
 
 
 
 
 
a1d0fa4
 
dde99f4
554cc8f
dde99f4
46e623d
dde99f4
 
 
 
 
 
 
46e623d
dde99f4
 
 
 
 
3d7efd9
dde99f4
 
3d7efd9
dde99f4
 
 
3d7efd9
dde99f4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8e13e1c
 
dde99f4
703ce10
 
22f86a8
 
 
 
 
 
 
 
46317e3
22f86a8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46317e3
 
 
 
 
 
22f86a8
46317e3
590f96a
01b39fb
46317e3
01b39fb
 
 
 
 
 
 
 
46317e3
 
01b39fb
 
 
 
 
 
 
 
 
 
 
46317e3
01b39fb
 
 
 
 
 
00a8bfc
3934427
 
 
01b39fb
 
46317e3
01b39fb
 
 
 
 
 
 
 
3934427
 
01b39fb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8a850b7
 
 
 
2c2fce4
8a850b7
2c2fce4
 
 
 
 
 
 
 
 
 
8a850b7
22f86a8
 
 
 
926ac94
 
 
 
 
46e623d
c7d9cf9
0d0f07a
dde99f4
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
import streamlit as st
from streamlit_option_menu import option_menu
from word2vec import *
import pandas as pd
from autocomplete import *
from plots import *
from lsj_dict import *
import json
from streamlit_tags import st_tags, st_tags_sidebar


st.set_page_config(page_title="ἄγαλμα | AGALMA", layout="centered", page_icon="images/AGALMA_logo.png")

# Cache data
@st.cache_data
def load_lsj_dict():
    return json.load(open('lsj_dict.json', 'r'))

@st.cache_data
def load_all_models_words():
    return sorted(load_compressed_word_list('corpora/compass_filtered.pkl.gz'), key=custom_sort)

@st.cache_data
def load_models_for_word_dict():
    return word_in_models_dict('corpora/compass_filtered.pkl.gz')


@st.cache_data
def load_all_lemmas():
    return load_compressed_word_list('all_lemmas.pkl.gz')

@st.cache_data
def load_lemma_count_dict():
    return count_lemmas('lemma_list_raw')

# Load compressed word list
all_models_words = load_all_models_words()

# Prepare lsj dictionary
lemma_dict = load_lsj_dict()

# Load dictionary with words as keys and eligible models as values
models_for_word_dict = load_models_for_word_dict()

lemma_counts = load_lemma_count_dict()



# Set styles for menu
styles_horizontal = {
    "container": {"display": "flex", "justify-content": "center"},
    "nav": {"display": "flex", "gap": "2px", "margin": "5px"},
    "nav-item": {"flex": "1", "font-family": "Helvetica"},
    "nav-link": {
        "background-color": "#f0f0f0",
        "border": "1px solid #ccc",
        "border-radius": "5px",
        "padding": "10px",
        "width": "150px",  
        "height": "60px",  
        "display": "flex",
        "align-items": "center",
        "justify-content": "center",
        "transition": "background-color 0.3s, color 0.3s",
        "color": "black",
        "text-decoration": "none"
    },
    "nav-link:hover": {
        "background-color": "rgb(238, 238, 238)",
        "color": "#000"
    },
    "nav-link-selected": {
        "background-color": "#B8E52B",
        "color": "white",
        "font-weight": "bold"
    },
    "icon": {"display": "None"}
}


styles_vertical = {
    "nav-link-selected": {
        "background-color": "#B8E52B",
        "color": "white",
        "font-weight": "bold"
    }
}

# Set vertical sidebar width to 350px
st.markdown(
    """
    <style>
        section[data-testid="stSidebar"] {
            width: 350px !important; # Set the width to your desired value
        }
    </style>
    """,
    unsafe_allow_html=True,
)


with st.sidebar:
    st.image('images/AGALMA_logo_v2.png')
    # st.markdown('# ἄγαλμα | AGALMA')
    selected = option_menu('ἄγαλμα | AGALMA', ["App", "About", "FAQ", "Subcorpora", "License"],
                           menu_icon="menu", default_index=0, orientation="vertical", styles=styles_vertical, icons=['house', 'file-person', 'question-square', 'book', 'file-earmark'])
    
if selected == "App":
    # Horizontal menu
    active_tab = option_menu(None, ["Nearest neighbours", "Cosine similarity", "3D graph", 'Dictionary'], 
        menu_icon="cast", default_index=0, orientation="horizontal", styles=styles_horizontal)


    # Adding CSS style to remove list-style-type
    st.markdown("""
    <style>
    /* Define a class to remove list-style-type */
    .no-list-style {
        list-style-type: none;
    }
    </style>
    """, unsafe_allow_html=True)



    # Nearest neighbours tab
    if active_tab == "Nearest neighbours":
        
        # All models in a list
        eligible_models = ["Archaic", "Classical", "Hellenistic", "Early Roman", "Late Roman"]
        all_models_words = load_all_models_words()
        
        with st.container():
            st.markdown("## Nearest Neighbours")
            st.markdown(
                    'Here you can extract the nearest neighbours to a chosen lemma. \
                    Please select one or more time slices and the preferred number of nearest neighbours. \
                    Only type in Greek, with correct spirits and accents.'
                )
            target_word = st.multiselect("Enter a word", options=all_models_words, max_selections=1)
            if len(target_word) > 0:
                target_word = target_word[0]
                
                eligible_models = models_for_word_dict[target_word]
            
            models = st.multiselect(
                "Select models to search for neighbours",
                eligible_models
                )
            n = st.slider("Number of neighbours", 1, 50, 15)
            
            nearest_neighbours_button = st.button("Find nearest neighbours")

        if nearest_neighbours_button:
            if validate_nearest_neighbours(target_word, n, models) == False:
                st.error('Please fill in all fields')
            else:
                # Rewrite models to list of all loaded models
                models = load_selected_models(models)
                
                nearest_neighbours = get_nearest_neighbours(target_word, n, models)
                
                all_dfs = []
                                
                # Create dataframes
                for model in nearest_neighbours.keys():
                    st.write(f"### {model}")
                    df = pd.DataFrame(
                        nearest_neighbours[model],
                        columns = ['Word', 'Cosine Similarity']
                    )
                    
                    # Add word occurences to dataframe
                    df['Occurences'] = df['Word'].apply(lambda x: lemma_counts[model][x])


                    
                    all_dfs.append((model, df))
                    st.table(df)        
                
                
                # Store content in a temporary file
                tmp_file = store_df_in_temp_file(all_dfs)
                
                # Open the temporary file and read its content
                with open(tmp_file, "rb") as file:
                    file_byte = file.read()
                    
                    # Create download button
                    st.download_button(
                        "Download results",
                        data=file_byte,
                        file_name = f'nearest_neighbours_{target_word}.xlsx',
                        mime='application/octet-stream'
                        )
                    

    # Cosine similarity tab
    elif active_tab == "Cosine similarity":
        all_models_words = load_all_models_words()
        
        with st.container():
            eligible_models_1 = []
            eligible_models_2 = []
            st.markdown("## Cosine similarity")
            st.markdown(
                    'Here you can extract the cosine similarity between two lemmas. \
                    Please select a time slice for each lemma. \
                    You can also calculate the cosine similarity between two vectors of the same lemma in different time slices. \
                    Only type in Greek, with correct spirits and accents.'
                )
            col1, col2 = st.columns(2)
            col3, col4 = st.columns(2)
            with col1:
                word_1 = st.multiselect("Enter a word", placeholder="πατήρ", max_selections=1, options=all_models_words)
                if len(word_1) > 0:
                    word_1 = word_1[0]
                    eligible_models_1 = models_for_word_dict[word_1]
                    
            with col2:
                time_slice_1 = st.selectbox("Time slice word 1", options = eligible_models_1)


            with st.container():
                with col3:
                    word_2 = st.multiselect("Enter a word", placeholder="μήτηρ", max_selections=1, options=all_models_words)
                    if len(word_2) > 0:
                        word_2 = word_2[0]
                        eligible_models_2 = models_for_word_dict[word_2]
                    
                with col4:
                    time_slice_2 = st.selectbox("Time slice word 2", eligible_models_2)
        
            # Create button for calculating cosine similarity
            cosine_similarity_button = st.button("Calculate cosine similarity")
        
        # If the button is clicked, execute calculation
        if cosine_similarity_button:
            cosine_simularity_score = get_cosine_similarity(word_1, time_slice_1, word_2, time_slice_2)
            st.markdown('''<span style="font-size: 24px"> The Cosine Similarity between %s (%s) and %s (%s) is: **%s**</span>''' % (word_1, time_slice_1, word_2, time_slice_2, cosine_simularity_score), unsafe_allow_html=True)	

    # 3D graph tab
    elif active_tab == "3D graph":
        st.markdown("## 3D graph")
        st.markdown('''
                    Here you can generate a 3D representation of the semantic space surrounding a target lemma. Please choose the lemma and the time slice.\
                    Only type in Greek, with correct spirits and accents. \
                    
                    
                    **NB**: the 3D representations are reductions of the multi-dimensional representations created by the models. \
                    This is necessary for visualization, but while reducing the dimnesions some informations gets lost. \
                    The 3D representations are thus not 100% accurate. For more information, please consult the FAQ.
                    ''')
        
        col1, col2 = st.columns(2)
        
        # Load compressed word list
        all_models_words = load_all_models_words()
        
        with st.container():
            eligible_models = []
            with col1:
                word = st.multiselect("Enter a word", all_models_words, max_selections=1)
                if len(word) > 0:
                    word = word[0]
                    eligible_models = models_for_word_dict[word]
                
            with col2:
                time_slice = st.selectbox("Time slice", eligible_models)

            n = st.slider("Number of words", 1, 50, 15)

            graph_button = st.button("Create 3D graph")
            
            if graph_button:
                time_slice_model = convert_time_name_to_model(time_slice)
                nearest_neighbours_vectors = get_nearest_neighbours_vectors(word, time_slice_model, n)
                
                fig, df = make_3d_plot_tSNE(nearest_neighbours_vectors, word, time_slice_model)            
                
                st.plotly_chart(fig)
                
                
                
            
                
    # Dictionary tab
    elif active_tab == "Dictionary":
        
        with st.container():
            st.markdown('## Dictionary')
            st.markdown('Search a word in the Liddell-Scott-Jones dictionary (only Greek, no whitespaces).')


            all_lemmas = load_all_lemmas()
            
            # query_word = st.multiselect("Search a word in the LSJ dictionary", all_lemmas, max_selections=1)
            
            query_tag = st_tags(label='',
                                text = '',
                                value = [],
                                suggestions = all_lemmas,
                                maxtags = 1,
                                key = '1'
                                )
            
            # If a word has been selected by user
            if query_tag:
                
                # Display word information
                if query_tag[0] in lemma_dict:
                    st.write(f"### {query_tag[0]}")
                    data = lemma_dict[query_tag[0]]
                elif query_tag[0].capitalize() in lemma_dict: # Some words are capitalized in the dictionary
                    st.write(f"### {query_tag[0].capitalize()}")
                    data = lemma_dict[query_tag[0].capitalize()]
                else:
                    st.error("Word not found in dictionary")
                    exit(-1)
                
                # Put text in readable format
                text = format_text(data)
                
                
                st.markdown(format_text(data), unsafe_allow_html = True)
                
                
                
                st.markdown("""
                            <style>
                            .tab {
                                display: inline-block;
                                margin-left: 4em;
                            }
                            .tr {
                                font-weight: bold;
                            }
                            .list-class {
                                list-style-type: none;
                                margin-top: 1em;
                            }
                            .primary-indicator {
                                font-weight: bold;
                                font-size: x-large;
                            }
                            .secondary-indicator {
                                font-weight: bold;
                                font-size: large;
                            }
                            .tertiary-indicator {
                                font-weight: bold;
                                font-size: medium;
                            }
                            .quaternary-indicator {
                                font-weight: bold;
                                font-size: medium;
                            }
                            .primary-class {
                                padding-left: 2em;
                            }
                            .secondary-class {
                                padding-left: 4em;
                            }
                            .tertiary-class {
                                padding-left: 6em;
                            }
                            .quaternary-class {
                                padding-left: 8em;
                            }
                            </style>
                            """, unsafe_allow_html=True)
                        

            
            
if selected == "About":
    st.markdown("""
        ## About
        Welcome to AGALMA | ἄγαλμα, the Ancient Greek Accessible Language Models for linguistic Analysis!        
        
        This interface was developed in the framework of Silvia Stopponi’s PhD project, \
        supervised by Saskia Peels-Matthey and Malvina Nissim at the University of Groningen (The Netherlands). \
        The aim of this tool is to make language models trained on Ancient Greek available to all interested people, respectless of their coding skills. \
            
        The following people were involved in the creation of this interface:
        
        **Mark den Ouden** developed the interface.
        
        **Silvia Stopponi** trained the models, defined the structure of the interface, and wrote the textual content.
        
        **Saskia Peels-Matthey** supervised the project and revised the structure of the interface and the textual content.
        
        **Malvina Nissim** supervised the project.
        
        **Anchoring Innovation** financially supported the creation of this interface. \
        Anchoring Innovation is the Gravitation Grant research agenda of the Dutch National Research School in Classical Studies, OIKOS. \
        It is financially supported by the Dutch ministry of Education, Culture and Science (NWO project number 024.003.012).
        
        <div style="text-align: center; font-weight: bold;">How to cite</div>
        
        If you use this interface for your research, please cite it as:

        Stopponi, Silvia, Mark den Ouden, Saskia Peels-Matthey & Malvina Nissim. 2024. \
        <span style="font-style: italic;">AGALMA: Ancient Greek Accessible Language Models for linguistic Analysis.</span>
        
        """, unsafe_allow_html=True)
 

if selected == "FAQ":
    st.markdown("""
        ## FAQ
        """)
    
    

    with st.expander(r"$\textsf{\Large What is this interface based on?}$"):
        st.write(
                "This interface is based on language models. Language models are probability distributions of \
                words or word sequences, which store statistical information about word co-occurrences. \
                This happens during the training phase, in which models process a corpus of texts in the \
                target language(s). Once trained, linguistic information can be extracted from the models, or \
                the models can be used to perform specific linguistic tasks. In this interface, we focus on the \
                extraction of semantic information. To that end, we created five models, corresponding to five \
                time slices. The models on which this interface is based are so-called Word Embedding \
                models (the specific architecture is called Word2Vec)."
                )
        
    with st.expander(r"$\textsf{\Large What are Word Embeddings?}$"):
        st.write(
            "Word Embeddings are representations of words obtained via language modelling. More in \
            detail, they are strings of numbers (called *vectors*) produced by a language model to \
            represent each word in the training corpus in a multi-dimensional space. Words that are more \
            similar in meaning will be closer to one another in this vector space (or semantic space) than \
            words that are less similar in meaning. The term *word embeddings* is often used as a \
            synonym of *predict models*, a type of language models introduced by Mikolov *et al.* (2013) \
            with the Word2Vec architecture. This interface is built upon Word2Vec models."
        )
        
    with st.expander(r"$\textsf{\Large Which corpus was used to train the models?}$"):
        st.markdown('''
            The five models on which this interface is based were trained on five diachronic slices of the \
            Diorisis Ancient Greek Corpus, which is ‘a digital collection of ancient Greek texts (from \
            Homer to the early fifth century AD) compiled for linguistic analyses’ (Vatri &amp; McGillivray \
            2018: 55). The Diorisis corpus contains a subset of the texts that can be found in the \
            Thesaurus Linguae Graecae. More information about the works and authors included in each \
            subcorpus is provided in the 'Subcorpora' tab in the menu on the left.'''
        , unsafe_allow_html=True)
        
        
        
    with st.expander(r"$\textsf{\Large How was the corpus divided into time slices?}$"):
        st.write(
            "The texts in the corpus were divided according to chronology. We tried to strike a balance \
            between respecting the traditional divisions of Ancient Greek literature into periods and \
            having slices of a more or less comparable size. The division is the following: \
            \
            Archaic: beginning-500 BCE; Classical: 499-324 BCE; Hellenistic: 323-0 BCE, Early Roman: \
            1-250 CE; Late Roman: 251-500 CE."
        )
    
    
    with st.expander(r"$\textsf{\small Which are the theoretical assumptions behind distributional semantic models, such as Word Embeddings?}$"):
        st.write(
            "Computational semantics is based on the Distributional Hypothesis. According to this \
            hypothesis, words used in similar lexical contexts (contexts of words surrounding them) will \
            have a similar meaning. This hypothesis was famously summarized by J.R. Firth as ‘you \
            shall know a word by the company it keeps’ (1957: xx). Phrased differently, this \
            means that two words that occur in similar lexical contexts are probably semantically \
            related. The words that occur in the most similar lexical contexts are referred to as \
            nearest neighbours. This does not necessarily mean, though, that these words even \
            occur together. A detailed introduction to distributional semantics can be found in the book \
            *Distributional Semantics* (Lenci &amp; Sahlgren 2023: 3-25)."
        )
        
    with st.expander(r"$\textsf{\Large What are the nearest neighbours?}$"):
        st.write(
            "Word vectors can be used as coordinates to represent words in a geometric space, called \
            *semantic space*. Words with similar vectors, occurring in similar contexts, are closer in the \
            space. The nearest neighbours to a word are the closest words to it in the semantic space. \
            Words close in the space are not necessarily synonyms, they are rather in a relationship of \
            semantic relatedness, i.e. they belong to the same semantic area. An example of neighbours \
            in the space could be: *star – moon – sun – cloud – plane – fly – blue*."
        )
        
    with st.expander(r"$\textsf{\Large Are the nearest neighbours the same as concordances?}$"):
        st.write(
            "No. The nearest neighbours to a target word do not necessarily occur together with it in the \
            same context, but each of them will be found in similar lexical contexts. For example, my \
            colleague Pete and I may often go to the same type of conferences and meet the same \
            group of people there, but it is quite possible that Pete and I never go to the same \
            conference at the same time. Pete and I are similar, but not necessarily spending time \
            together. The extraction of the nearest neighbours with word embeddings is thus different \
            from finding concordances. The nearest neighbours cannot be extracted manually with close- \
            reading methods."
        )
        
    with st.expander(r"$\textsf{\Large Which framework and parameters were used to train the models?}$"):
        st.write(
            "The Word2vec models were trained by using the CADE framework (Bianchi *et al.* 2020), a \
            technique which does not require space alignment, i.e. word embeddings trained on different \
            corpus slices are directly comparable. CADE was used with the following parameters: \
            size=30, siter=5, diter=5, workers=4, sg=0, ns=20. The chosen architecture was the \
            Continuous-Bag-of-Words. The context that is taken into account for each word are the 5 \
            words before, and the 5 words after the target word."
        )
        
    with st.expander(r"$\textsf{\Large What is the cosine similarity value?}$"):
        st.write(
            "The cosine similarity is a measure of the distance between two words in the semantic space. \
            More precisely, the cosine similarity is the cosine of angle between the two vectors in the \
            multi-dimensional space. The value ranges from -1 to 1. The higher the value of the cosine \
            similarity (the closer it is to 1), the closer two words are in the semantic space. For example, \
            according to our model, the cosine similarity value of πατήρ and μήτηρ in the Classical period \
            is 0.93, relatively high as we might expected for these obviously related words, while the \
            cosine similarity value of a random pair like πατήρ and τράπεζα in the same time slice is \
            0.12, considerably lower."
        )
   
    with st.expander(r"$\textsf{\Large What are the 3D representations?}$"):
        st.write(
           "The 3D representation is a way to graphically visualize the semantic space, the method used \
            on this website is called t-SNE. Semantic spaces are multi-dimensional, with as many \
            dimensions as the digits in the vectors. The embeddings used for this interface only have 30 \
            dimensions. A 3D representation reduces the dimensions to 3, to allow for graphic \
            representation. Even if 3D representations are effective means of making a semantic space \
            visible, **they are not 100% accurate**, since the visualization shows a reduction of the 30 \
            dimensions. We thus advise not to base any conclusions on the graphic representation only, \
            but to rely on nearest neighbours extraction and on cosine similarity."
       )
        
    with st.expander(r"$\textsf{\Large Is the information stored by Word Embeddings reliable?}$"):
        st.write(
            "The information stored in word embeddings is solely based on the training corpus. This \
            means that our models have no additional knowledge of the Ancient Greek language and \
            culture. All information extracted from a model thus reflect word co-occurrences, and word \
            meaning, in its specific training corpus. \
            \
            Please take into account that the results for words occurring very rarely may be inaccurate. \
            Language modelling works on a statistical basis, so that a word with only few occurrences \
            may not provide enough evidence to obtain reliable results. But it has been observed that an \
            extremely high word frequency can also affect the results. It often happens that the nearest \
            neighbours to words occurring very often are other high-frequency words, such as stop \
            words (e.g., prepositions, articles, particles). "
        )
        
    with st.expander(r"$\textsf{\Large What if I obtain 'strange' results?}$"):
        st.write(
            "For the abovementioned reasons mentioned, word embeddings are not always reliable \
            methods of semantic investigation. Interpretation of the results is always needed to decide \
            whether the results at hand are real patterns present in the corpus, and could thus reveal \
            interesting phenomena, or just noise present in the data."
        )
   
    with st.expander(r"$\textsf{\Large How can word embeddings help us study semantic change?}$"):
        st.write(
            "Cosine similarity can be computed between vectors of the same word in different time slices. \
            The higher the cosine similarity, the more similar the usage of a word is in the two considered \
            time slices. If the cosine similarity between a word’s vectors in two consecutive time slices is \
            particularly low, there is a chance that semantic change happened at that point in time. The \
            analysis of the nearest neighbours to the target word in the two slices can help clarifying if \
            change actually happened, and which is its direction."
        )
    
    st.markdown("""
    ## References
    
    Bianchi, F., Di Carlo, V., Nicoli, P., &amp; Palmonari, M. (2020). Compass-aligned distributional
    embeddings for studying semantic differences across corpora. *arXiv preprint
    arXiv:2004.06519*.
    
    Lenci, A., &amp; Sahlgren, M. (2023). *Distributional semantics*. Cambridge University Press.
    
    Mikolov, T., Chen, K., Corrado, G., &amp; Dean, J. (2013). Efficient estimation of word
    representations in vector space. *arXiv preprint arXiv:1301.3781*.

    Vatri, A., &amp; McGillivray, B. (2018). The Diorisis ancient Greek corpus: Linguistics and
    literature. *Research Data Journal for the Humanities and Social Sciences*, 3(1), 55-65.
    """)
      
  
if selected == "Subcorpora":
    st.markdown("""
                ## Subcorpora

                | Time Slice               | Tokens     | Authors/Texts                                                                                                                                                                                                                                                                                                 |
                |--------------------------|------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
                | Archaic (Homer-500 BCE)  | 229,999    | Homer, Hesiod, *Shield of Heracles*, 34 Homeric hymns.                                                                                                                                                                                                                                                           |
                | Classical (499-324 BCE)  | 2,628,193  | Andocides, Aeneas Tacticus, Antiphon, Aeschines, Aeschylus, Aristophanes, Aristotle, Demosthenes, Demades, Euripides, Herodotus, Hippocrates, Hyperides, Isaeus, Isocrates, Lycurgus, Lysias, Pindar, Plato, Sophocles, Thucydides, Xenophon.                          |
                | Hellenistic (323-31 BCE) | 1,471,917  | Apollonius Rhodius, Aratus, Asclepiodotus, Callimachus, Bion of Phlossa, Demetrius, *Against Dionysodorus* (Demosthenes), Dinarchus, Diodorus, Euclides, Hyperides, Moschus, Lycophron, Septuaginta, Polybius, Theocritus, Theophrastus.                                   |
                | Early Roman (30 BCE-250 CE) | 4,900,879 | Achilles Tatius, Aelian, Appian, Agathemerus, Aelius Aristides, Aretaeus, Arrian, Athenaeus, Barnabas, Cassius Dio, Clement of Alexandria, Claudius Ptolemy, Chariton, Dio Chrysostom, Diogenes Laertius, Dionysius of Halicarnassus, Epictetus, Flavius Josephus, Harpocration, Galen, Lucian, Longinus, Longus, New Testament, Marcus Aurelius, Oppian, Oppian of Apamaea, Onasander, Philostratus the Athenian, Philostratus the Younger, Parthenius of Nicaea, Pausanias, Philostratus of Lemnos, Plutarch, Pseudo Apollodorus, Pseudo-Aristides, Pseudo-Plutarch, *Second Alcibiades*, Strabo, Triphiodorus, Xenophon of Ephesus. |
                | Late Roman (251-500 CE)  | 753,907    | Callistratus, Basilius, Eusebius of Caesarea, Julian the Emperor, Nonnus, Plotinus, Quintus Smyrnaeus.                                                                                                                                                                                                        |

                """, unsafe_allow_html=True)
    

if selected == "License":
    st.markdown("""
        ## License
        The cosine similarity, nearest neighbours, and 3D representation data are licensed under a CC BY License.

        The LSJ dictionary has a CC BY-SA license and comes from the Unicode version of the dictionary produced by \
        [Giuseppe G. A. Celano](%s). The original (Betacode) version is provided under a CC BY-SA license by the [Perseus Digital Library](https://www.perseus.tufts.edu/). \
        Data available at https://github.com/PerseusDL/lexica/.
        """ % 'https://github.com/gcelano/LSJ_GreekUnicode?tab=readme-ov-file')
            
            

streamlit_style = """
            <style> 
            html, body {
                font-family: 'Helvetica';
            }
            </style>
            """
            
st.markdown(streamlit_style, unsafe_allow_html=True)