llama_tokenizer / app.py
teragron's picture
Update app.py
db50e26
raw
history blame
1.06 kB
import gradio as gr
from transformers import LlamaTokenizer
import io
import json
os.system("pip uninstall -y gradio")
os.system("pip install gradio==4.9.0")
print("grd version:", gr.__version__)
# Load the tokenizer from the specific folder
tokenizer = LlamaTokenizer.from_pretrained("llama_tokenizer")
def tokenize(input_text, file=None):
if file:
with open(file, encoding="utf-8") as f:
full_text = "".join(f.readlines())
else:
full_text = input_text
tokens = tokenizer.encode(full_text, add_special_tokens=False)
num_tokens = len(tokens)
return num_tokens
with gr.Blocks() as demo:
gr.Markdown(
"""
# Token Counter for LLAMA
""")
with gr.Row():
text_input = gr.Textbox(placeholder="Enter prompt")
file_input = gr.File(label="Upload File", type="filepath")
with gr.Column():
out = gr.Textbox(label="Number of tokens")
run_btn = gr.Button("Run")
run_btn.click(fn=tokenize, inputs=[text_input, file_input], outputs=out)
demo.launch()