cakiki's picture
Update app.py
7a357c8
raw
history blame
1.19 kB
import streamlit as st
from tokenizers.tools import EncodingVisualizer
from transformers import AutoTokenizer
st.set_page_config(page_title="BigCode Tokenizer", page_icon='πŸ‘©β€πŸ’»', layout="wide")
models = [
"bigcode/tokenizer-the-stack-march-sample-v3-no-prefix-spaces",
"bigcode/tokenizer-the-stack-march-sample-v3",
"bigcode/tokenizer-the-stack-march-sample-v2",
"bigcode/tokenizer-the-stack-march-sample",
"bigcode/santacoder",
"bigcode/digit-bytelevel-bpe-jss-v1.1-49152",
"facebook/incoder-6B",
"Salesforce/codegen-16B-mono"
]
@st.cache()
def load_tokenizer():
tokenizer = AutoTokenizer.from_pretrained(selected_model)
return tokenizer
col1, col2 = st.columns([1, 2])
with col1:
selected_model = st.selectbox("Select a tokenizer", models, key=1)
text = st.text_area(label="", placeholder="Text to tokenize")
button_clicked = st.button("Tokenize")
tokenizer = load_tokenizer()
visualizer = EncodingVisualizer(tokenizer=tokenizer._tokenizer, default_to_notebook=False)
if text or button_clicked:
st.write(f"The input was split into {len(tokenizer.tokenize(text))} tokens.")
st.components.v1.html(visualizer(text), height=1500)