File size: 1,108 Bytes
5131ece
 
 
 
 
 
ddc2323
 
 
 
 
 
 
 
 
 
 
 
 
5131ece
 
 
ddc2323
 
 
 
5131ece
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
import streamlit as st
from tokenizers.tools import EncodingVisualizer
from transformers import AutoTokenizer

st.set_page_config(page_title="BigCode Tokenizer", page_icon='👩‍💻', layout="wide")

models = ["bigcode/tokenizer", "facebook/incoder-6B", "Salesforce/codegen-16B-mono"]

@st.cache()
def load_tokenizer():
    if selected_model == "bigcode/tokenizer":
        tokenizer = AutoTokenizer.from_pretrained("bigcode/tokenizer", subfolder="digit-custom_punctuation-bytelevel-bpe-py-js-java-50k")
    else:
        tokenizer = AutoTokenizer.from_pretrained(selected_model)
    return tokenizer

col1, col2 = st.columns([1, 2])
with col1:
    selected_model = st.selectbox("Select a tokenizer", models, key=1)

text = st.text_area(label="", placeholder="Text to tokenize")
button_clicked = st.button("Tokenize")

tokenizer = load_tokenizer()
visualizer = EncodingVisualizer(tokenizer=tokenizer._tokenizer, default_to_notebook=False)

if text or button_clicked:
    st.write(f"The input was split into {len(tokenizer.tokenize(text))} tokens.")
    st.components.v1.html(visualizer(text), height=1500)