File size: 1,193 Bytes
5131ece
 
 
 
 
 
1c7467f
7a357c8
1c7467f
 
 
 
 
 
 
 
ddc2323
 
 
e3220e5
ddc2323
 
 
 
 
5131ece
 
 
ddc2323
 
 
 
5131ece
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import streamlit as st
from tokenizers.tools import EncodingVisualizer
from transformers import AutoTokenizer

st.set_page_config(page_title="BigCode Tokenizer", page_icon='👩‍💻', layout="wide")

models = [
    "bigcode/tokenizer-the-stack-march-sample-v3-no-prefix-spaces",
    "bigcode/tokenizer-the-stack-march-sample-v3",
    "bigcode/tokenizer-the-stack-march-sample-v2",
    "bigcode/tokenizer-the-stack-march-sample",
    "bigcode/santacoder",
    "bigcode/digit-bytelevel-bpe-jss-v1.1-49152",
    "facebook/incoder-6B",
    "Salesforce/codegen-16B-mono"
]

@st.cache()
def load_tokenizer():
    tokenizer = AutoTokenizer.from_pretrained(selected_model)
    return tokenizer

col1, col2 = st.columns([1, 2])
with col1:
    selected_model = st.selectbox("Select a tokenizer", models, key=1)

text = st.text_area(label="", placeholder="Text to tokenize")
button_clicked = st.button("Tokenize")

tokenizer = load_tokenizer()
visualizer = EncodingVisualizer(tokenizer=tokenizer._tokenizer, default_to_notebook=False)

if text or button_clicked:
    st.write(f"The input was split into {len(tokenizer.tokenize(text))} tokens.")
    st.components.v1.html(visualizer(text), height=1500)