File size: 4,828 Bytes
28f4a08
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
02bbaf8
 
28f4a08
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
import streamlit as st
import spacy
from streamlit_echarts import st_echarts
from annotated_text import annotated_text

st.set_page_config(
     page_title="LeetSpeak-NER",
     page_icon=":mega:",
     layout="wide",
     initial_sidebar_state="expanded",
     menu_items={
         'Get Help': 'https://www.extremelycoolapp.com/help',
         'Report a bug': "https://www.extremelycoolapp.com/bug",
         'About': "# This is a header. This is an *extremely* cool app!"
     }
 )


@st.cache(show_spinner=False, allow_output_mutation=True, suppress_st_warning=True)
def load_models():
    spanish_model = spacy.load("./spacy-models/toy_output_es_blank/model-best/")
    english_model = spacy.load("./spacy-models/toy_output_en_blank/model-best/")
    models = {"English": english_model, "Spanish": spanish_model}
    return models


# 'INV_CAMO', 'LEETSPEAK', 'MIX', 'PUNCT_CAMO'
def process_text(doc, selected_multi_ner):
    tokens = []
    for token in doc:
        if selected_multi_ner == "Yes":
            if token.ent_type_ == "INV_CAMO":
                tokens.append((token.text, "INV_CAMO", "#faa"))
            elif token.ent_type_ == "LEETSPEAK":
                tokens.append((token.text, "LEETSPEAK", "#fda"))
            elif token.ent_type_ == "MIX":
                tokens.append((token.text, "MIX", "#afa")) 
            elif token.ent_type_ == "PUNCT_CAMO":
                tokens.append((token.text, "PUNCT_CAMO", "#aaaaff")) 
            else:
                tokens.append(" " + token.text + " ")
        else:
            if token.ent_type_ in ['INV_CAMO', 'LEETSPEAK', 'MIX', 'PUNCT_CAMO']:
                tokens.append((token.text, "CAMOUFLAGE", "#ffd5aa")) 
            else:
                tokens.append(" " + token.text + " ")                
            

    return tokens


# Side bar
selected_language = st.sidebar.selectbox("Select a language", options=["English", "Spanish"])
selected_multi_ner = st.sidebar.radio('Do you want to break down the Entities detected by type of leetspeak?', ['Yes', 'No'])

models = load_models()
selected_model = models[selected_language]

import base64

LOGO_IMAGE = "aida_logo.png"

st.markdown(
    """
    <style>
    .container {
        display: flex;

    }
    .logo-img {
        float:right;
        margin-top: 2.2em;
        margin-left: -10em;
    }
    </style>
    """,
    unsafe_allow_html=True
)


col1, col2 = st.columns([4, 1])
with col1: 
    st.markdown("""
    <style>
    .big-font {
        font-size:3em;
        font-weight: bold;
    }
    </style>
    """, unsafe_allow_html=True)

    st.markdown('<p class="big-font">Welcome to <font color="#4B8BBE">Leet</font><font color="#FFE873">Speak</font><font color="#ff73a2">-NER</font></p>', unsafe_allow_html=True)
with col2:
    # st.image('./aida_logo.png')
     st.markdown(
        f"""
        <div class="container">
            <img class="logo-img" src="data:image/png;base64,{base64.b64encode(open(LOGO_IMAGE, "rb").read()).decode()}">
        </div>
        """,
        unsafe_allow_html=True
    )  



with st.expander("Project Description", expanded=False):
     st.write("""
         Developed in Applied Intelligence and Data Analysis ([AI+DA](http://aida.etsisi.upm.es/)) group at Polytech University of Madrid (UPM).
         This tool uses a Spacy-Transformer Name Entity Recognition model to detect the presence of word camouflaged. Word camouflage is currently used to evade content moderation in Social Media. Therefore, the aim of this tool is to counter new ways of misinformation that emerge in social media platforms.
         
         Currently, two languages are supported: English and Spanish. Additionally, you can select whether the detected entities are broken down into the three types of camouflaged words: Canonical Leetspeak, Punctuation Camouflaged, Inversion Camouflaged.
     """)  
    
    
    
    

st.subheader("Input Text")   

with st.form("my_form"):
    text_input = st.text_area('Insert a text to detect leetspeak entities. Try for example: "@#plan#demia, pl@πd€m1∆ instead of “pandemia” (pandemic)"', 
                              # placeholder="@#plan#demia, pl@πd€m1∆ instead of “pandemia” (pandemic)", 
                              # value="@#plan#demia, pl@πd€m1∆ instead of “pandemia” (pandemic)"
                             )

    uploaded_file = st.file_uploader("or Upload a file", type=["doc", "docx", "pdf", "txt"])
    if uploaded_file is not None:
        text_input = uploaded_file.getvalue()
        text_input = text_input.decode("utf-8")
    
    # Every form must have a submit button.
    submitted = st.form_submit_button("Submit")
       
    

    
st.subheader("Output")
doc = selected_model(text_input)
tokens = process_text(doc, selected_multi_ner)

annotated_text(*tokens)