tamang0000
commited on
Commit
·
85ba6af
1
Parent(s):
2ae07a4
added assamese examples
Browse files- app-temp.py +178 -0
- app.py +14 -24
- data/multilingualphrases01-as.txt +6 -0
app-temp.py
ADDED
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
# import os
|
3 |
+
import tiktoken
|
4 |
+
from transformers import AutoTokenizer
|
5 |
+
|
6 |
+
import gradio as gr
|
7 |
+
|
8 |
+
logger = logging.getLogger(__name__) # noqa
|
9 |
+
|
10 |
+
# hugging face
|
11 |
+
# hf_token = os.getenv('HUGGINGFACE_TOKEN')
|
12 |
+
# HfApi().login(token=hf_token)
|
13 |
+
|
14 |
+
def load_test_phrases(filename):
|
15 |
+
with open(f"./data/{filename}", "r", encoding="utf-8") as file:
|
16 |
+
return file.read().splitlines()
|
17 |
+
|
18 |
+
|
19 |
+
models = ["Xenova/claude-tokenizer", # Anthropic
|
20 |
+
"meta-llama/Llama-2-7b-chat-hf", # LLAMA-2
|
21 |
+
"beomi/llama-2-ko-7b", # LLAMA-2-ko
|
22 |
+
"ai4bharat/Airavata", # ARIVATA
|
23 |
+
"openaccess-ai-collective/tiny-mistral", # Mistral
|
24 |
+
"gpt-3.5-turbo", # GPT3.5
|
25 |
+
"meta-llama/Meta-Llama-3-8B-Instruct", # LLAMA-3
|
26 |
+
"CohereForAI/aya-23-8B", # AYA
|
27 |
+
"google/gemma-1.1-2b-it", # GEMMA
|
28 |
+
"gpt-4o", # GPT4o
|
29 |
+
"TWO/sutra-mlt256-v2", # SUTRA
|
30 |
+
"tamang0000/assamese-tokenizer-50k" # Assamese
|
31 |
+
]
|
32 |
+
|
33 |
+
test_phrase_set = [
|
34 |
+
"I am going for a walk later today",
|
35 |
+
"நாங்கள் சந்திரனுக்கு ராக்கெட் பயணத்தில் இருக்கிறோம்",
|
36 |
+
|
37 |
+
"중성자 산란을 다섯 문장으로 설명해주세요", # Korean,
|
38 |
+
|
39 |
+
"मुझे पाँच वाक्यों में न्यूट्रॉन प्रकीर्णन की व्याख्या दीजिए", # Hindi
|
40 |
+
"mujhe paanch vaakyon mein nyootron prakeernan kee vyaakhya deejie",
|
41 |
+
|
42 |
+
"আমাকে পাঁচটি বাক্যে নিউট্রন বিচ্ছুরণের একটি ব্যাখ্যা দিন", # Bengali/Bangla
|
43 |
+
"Amake pamcati bakye ni'utrana bicchuranera ekati byakhya dina",
|
44 |
+
|
45 |
+
"મને પાંચ વાક્યોમાં ન્યુટ્રોન સ્કેટરિંગની સમજૂતી આપો", # Gujarati
|
46 |
+
"Mane panca vakyomam n'yutrona sketaringani samajuti apo",
|
47 |
+
|
48 |
+
"நியூட்ரான் சிதறல் பற்றிய விளக்கத்தை ஐந்து வாக்கியங்களில் கொடுங்கள்", # Tamil
|
49 |
+
"Niyutran citaral parriya vilakkattai aintu vakkiyankalil kotunkal",
|
50 |
+
|
51 |
+
"मला पाच वाक्यात न्यूट्रॉन स्कॅटरिंगचे स्पष्टीकरण द्या", # Marathi
|
52 |
+
|
53 |
+
"ఐదు వాక్యాలలో న్యూట్రాన్ స్కాటరింగ్ గురించి నాకు వివరణ ఇవ్వండి", # Telugu
|
54 |
+
]
|
55 |
+
|
56 |
+
test_phrase_set_long_1 = load_test_phrases('multilingualphrases01.txt')
|
57 |
+
test_phrase_set_long_2 = load_test_phrases('multilingualphrases02.txt')
|
58 |
+
test_phrase_set_long_3 = load_test_phrases('multilingualphrases03.txt')
|
59 |
+
|
60 |
+
|
61 |
+
def generate_tokens_as_table(text):
|
62 |
+
table = []
|
63 |
+
for model in models:
|
64 |
+
if 'gpt' not in model:
|
65 |
+
tokenizer = AutoTokenizer.from_pretrained(model)
|
66 |
+
tokens = tokenizer.encode(text, add_special_tokens=False)
|
67 |
+
else:
|
68 |
+
tokenizer = tiktoken.encoding_for_model(model)
|
69 |
+
tokens = tokenizer.encode(text)
|
70 |
+
decoded = [tokenizer.decode([t]) for t in tokens]
|
71 |
+
table.append([model] + decoded)
|
72 |
+
return table
|
73 |
+
|
74 |
+
|
75 |
+
def generate_tokenizer_table(text):
|
76 |
+
if not text:
|
77 |
+
return []
|
78 |
+
|
79 |
+
token_counts = {model: 0 for model in models}
|
80 |
+
vocab_size = {model: 0 for model in models}
|
81 |
+
|
82 |
+
for model in models:
|
83 |
+
if 'gpt' not in model:
|
84 |
+
tokenizer = AutoTokenizer.from_pretrained(model)
|
85 |
+
vocab_size[model] = tokenizer.vocab_size
|
86 |
+
else:
|
87 |
+
tokenizer = tiktoken.encoding_for_model(model)
|
88 |
+
vocab_size[model] = tokenizer.n_vocab
|
89 |
+
|
90 |
+
token_counts[model] += len(tokenizer.encode(text))
|
91 |
+
|
92 |
+
word_count = len(text.split(' '))
|
93 |
+
|
94 |
+
output = []
|
95 |
+
for m in models:
|
96 |
+
row = [m, vocab_size[m], word_count, token_counts[m], f"{token_counts[m] / word_count:0.2f}"]
|
97 |
+
output.append(row)
|
98 |
+
|
99 |
+
return output
|
100 |
+
|
101 |
+
|
102 |
+
def generate_split_token_table(text):
|
103 |
+
if not text:
|
104 |
+
return gr.Dataframe()
|
105 |
+
|
106 |
+
table = generate_tokenizer_table(text)
|
107 |
+
return gr.Dataframe(
|
108 |
+
table,
|
109 |
+
headers=['tokenizer', 'v size', '#word', '#token', '#tokens/word'],
|
110 |
+
datatype=["str", "number", "str"],
|
111 |
+
row_count=len(models),
|
112 |
+
col_count=(5, "fixed"),
|
113 |
+
)
|
114 |
+
|
115 |
+
|
116 |
+
with gr.Blocks() as sutra_token_count:
|
117 |
+
gr.Markdown(
|
118 |
+
"""
|
119 |
+
# Multilingual Tokenizer Specs & Stats.
|
120 |
+
## Tokenize paragraphs in multiple languages and compare token counts.
|
121 |
+
Space inspired from [SUTRA](https://huggingface.co/spaces/TWO/sutra-tokenizer-comparison)
|
122 |
+
""")
|
123 |
+
textbox = gr.Textbox(label="Input Text")
|
124 |
+
submit_button = gr.Button("Submit")
|
125 |
+
output = gr.Dataframe()
|
126 |
+
examples = [
|
127 |
+
[' '.join(test_phrase_set_long_1)],
|
128 |
+
[' '.join(test_phrase_set_long_2)],
|
129 |
+
[' '.join(test_phrase_set_long_3)],
|
130 |
+
]
|
131 |
+
gr.Examples(examples=examples, inputs=[textbox])
|
132 |
+
submit_button.click(generate_split_token_table, inputs=[textbox], outputs=[output])
|
133 |
+
|
134 |
+
|
135 |
+
def generate_tokens_table(text):
|
136 |
+
table = generate_tokens_as_table(text)
|
137 |
+
cols = len(table[0])
|
138 |
+
return gr.Dataframe(
|
139 |
+
table,
|
140 |
+
headers=['model'] + [str(i) for i in range(cols - 1)],
|
141 |
+
row_count=2,
|
142 |
+
col_count=(cols, "fixed"),
|
143 |
+
)
|
144 |
+
|
145 |
+
|
146 |
+
with gr.Blocks() as sutra_tokenize:
|
147 |
+
gr.Markdown(
|
148 |
+
"""
|
149 |
+
# Multilingual Tokenizer Sentence Inspector.
|
150 |
+
## Tokenize a sentence with various tokenizers and inspect how it's broken down.
|
151 |
+
Space inspired from [SUTRA](https://huggingface.co/spaces/TWO/sutra-tokenizer-comparison)
|
152 |
+
""")
|
153 |
+
textbox = gr.Textbox(label="Input Text")
|
154 |
+
submit_button = gr.Button("Submit")
|
155 |
+
output = gr.Dataframe()
|
156 |
+
examples = test_phrase_set
|
157 |
+
gr.Examples(examples=examples, inputs=[textbox])
|
158 |
+
submit_button.click(generate_tokens_table, inputs=[textbox], outputs=[output])
|
159 |
+
|
160 |
+
|
161 |
+
if __name__ == '__main__':
|
162 |
+
with gr.Blocks(analytics_enabled=False) as demo:
|
163 |
+
with gr.Row():
|
164 |
+
gr.Markdown(
|
165 |
+
"""
|
166 |
+
## <img src="https://raw.githubusercontent.com/SAGAR-TAMANG/sagar-tamang-official-website-new/master/img/pi.jpg" height="20"/>
|
167 |
+
"""
|
168 |
+
)
|
169 |
+
with gr.Row():
|
170 |
+
gr.TabbedInterface(
|
171 |
+
interface_list=[sutra_tokenize, sutra_token_count],
|
172 |
+
tab_names=["Tokenize Text", "Tokenize Paragraphs"]
|
173 |
+
)
|
174 |
+
|
175 |
+
demo.queue(default_concurrency_limit=5).launch(
|
176 |
+
server_name="0.0.0.0",
|
177 |
+
allowed_paths=["/"],
|
178 |
+
)
|
app.py
CHANGED
@@ -22,7 +22,7 @@ models = ["Xenova/claude-tokenizer", # Anthropic
|
|
22 |
"ai4bharat/Airavata", # ARIVATA
|
23 |
"openaccess-ai-collective/tiny-mistral", # Mistral
|
24 |
"gpt-3.5-turbo", # GPT3.5
|
25 |
-
|
26 |
"CohereForAI/aya-23-8B", # AYA
|
27 |
"google/gemma-1.1-2b-it", # GEMMA
|
28 |
"gpt-4o", # GPT4o
|
@@ -31,31 +31,21 @@ models = ["Xenova/claude-tokenizer", # Anthropic
|
|
31 |
]
|
32 |
|
33 |
test_phrase_set = [
|
34 |
-
"
|
35 |
-
"
|
36 |
|
37 |
-
"
|
|
|
38 |
|
39 |
-
"
|
40 |
-
"
|
41 |
-
|
42 |
-
"
|
43 |
-
"Amake pamcati bakye ni'utrana bicchuranera ekati byakhya dina",
|
44 |
-
|
45 |
-
"મને પાંચ વાક્યોમાં ન્યુટ્રોન સ્કેટરિંગની સમજૂતી આપો", # Gujarati
|
46 |
-
"Mane panca vakyomam n'yutrona sketaringani samajuti apo",
|
47 |
-
|
48 |
-
"நியூட்ரான் சிதறல் பற்றிய விளக்கத்தை ஐந்து வாக்கியங்களில் கொடுங்கள்", # Tamil
|
49 |
-
"Niyutran citaral parriya vilakkattai aintu vakkiyankalil kotunkal",
|
50 |
-
|
51 |
-
"मला पाच वाक्यात न्यूट्रॉन स्कॅटरिंगचे स्पष्टीकरण द्या", # Marathi
|
52 |
-
|
53 |
-
"ఐదు వాక్యాలలో న్యూట్రాన్ స్కాటరింగ్ గురించి నాకు వివరణ ఇవ్వండి", # Telugu
|
54 |
]
|
55 |
|
56 |
-
test_phrase_set_long_1 = load_test_phrases('multilingualphrases01.txt')
|
57 |
-
test_phrase_set_long_2 = load_test_phrases('multilingualphrases02.txt')
|
58 |
-
test_phrase_set_long_3 = load_test_phrases('multilingualphrases03.txt')
|
59 |
|
60 |
|
61 |
def generate_tokens_as_table(text):
|
@@ -125,8 +115,8 @@ with gr.Blocks() as sutra_token_count:
|
|
125 |
output = gr.Dataframe()
|
126 |
examples = [
|
127 |
[' '.join(test_phrase_set_long_1)],
|
128 |
-
[' '.join(test_phrase_set_long_2)],
|
129 |
-
[' '.join(test_phrase_set_long_3)],
|
130 |
]
|
131 |
gr.Examples(examples=examples, inputs=[textbox])
|
132 |
submit_button.click(generate_split_token_table, inputs=[textbox], outputs=[output])
|
|
|
22 |
"ai4bharat/Airavata", # ARIVATA
|
23 |
"openaccess-ai-collective/tiny-mistral", # Mistral
|
24 |
"gpt-3.5-turbo", # GPT3.5
|
25 |
+
"meta-llama/Meta-Llama-3-8B-Instruct", # LLAMA-3
|
26 |
"CohereForAI/aya-23-8B", # AYA
|
27 |
"google/gemma-1.1-2b-it", # GEMMA
|
28 |
"gpt-4o", # GPT4o
|
|
|
31 |
]
|
32 |
|
33 |
test_phrase_set = [
|
34 |
+
"মই আজিৰ পাছত হ’ব লগা হাঁহিৰ বাবে ওলাই থাকিম",
|
35 |
+
"আমি চন্দ্ৰলৈ ৰকেট যাত্ৰাত আছোঁ",
|
36 |
|
37 |
+
"পাঁচখন বাক্যৰে নিউট্ৰন বিকিৰণৰ বৰ্ণনা দিয়ক", # Assamese
|
38 |
+
"আমাক পাঁচখন বাক্যৰে নিউট্ৰন বিকিৰণৰ বৰ্ণনা দিয়ক",
|
39 |
|
40 |
+
"মোৰ বন্ধুটোৱে চাৰিটা পুথি পঢ়িছে", # Assamese
|
41 |
+
"মোৰ ঘৰখন গাঁওখনৰ আটাইতকৈ বেছি ডাঙৰ", # Assamese
|
42 |
+
"আজিৰে পৰা মই সৰু সৰু কামবোৰ কৰি থাকিম", # Assamese
|
43 |
+
"তেওঁৰ মাতবোৰ আৰু শাৰীবোৰ সলনি হোৱা দেখি চমক লাগিল", # Assamese
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
]
|
45 |
|
46 |
+
test_phrase_set_long_1 = load_test_phrases('multilingualphrases01-as.txt')
|
47 |
+
# test_phrase_set_long_2 = load_test_phrases('multilingualphrases02.txt')
|
48 |
+
# test_phrase_set_long_3 = load_test_phrases('multilingualphrases03.txt')
|
49 |
|
50 |
|
51 |
def generate_tokens_as_table(text):
|
|
|
115 |
output = gr.Dataframe()
|
116 |
examples = [
|
117 |
[' '.join(test_phrase_set_long_1)],
|
118 |
+
# [' '.join(test_phrase_set_long_2)],
|
119 |
+
# [' '.join(test_phrase_set_long_3)],
|
120 |
]
|
121 |
gr.Examples(examples=examples, inputs=[textbox])
|
122 |
submit_button.click(generate_split_token_table, inputs=[textbox], outputs=[output])
|
data/multilingualphrases01-as.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
কুকুৰ আৰু মেকুৰীৰ মুখ্য ভেদবোৰ হৈছে শাৰীৰিক বৈশিষ্ট্য, ব্যৱহাৰ আৰু গৃহপালিত কৰাৰ ইতিহাস। কুকুৰ সাধাৰণতে মেকুৰীৰ তুলনাত ডাঙৰ আৰু বিভিন্ন আকাৰ আৰু আকৃতিৰ হয়। তেওঁলোকে বিশ্বাসযোগ্যতা, সামাজিকতা আৰু প্ৰশিক্ষণ ল'ব পৰা সক্ষমতাৰ বাবে জনাজাত। আনহাতে, মেকুৰী সাধাৰণতে সৰু আৰু ক্ৰিয়াশীল। তেওঁলোকে অধিক সৰ্বস্বতন্ত্র আৰু কেতিয়াবা নিষ্ঠুৰ বুলি জনা যায়। লগতে, কুকুৰক হাজাৰ বছৰৰ পৰা গৃহপালিত কৰা হৈছে আৰু মানুহৰ সৈতে কাম কৰি অহা এক দীঘলীয়া ইতিহাস আছে, মেকুৰীৰ গৃহপালিত কৰাৰ ইতিহাস অধিক নতুন আৰু মূলত সঙ্গী হিচাপে ৰাখা হয়।
|
2 |
+
|
3 |
+
অহা কালিৰ পৰা মই এটা নতুন অভ্যাস আৰম্ভ কৰিম। মোৰ ঘৰখনৰ চাৰিওফালে বহুতো সেউজীয়া গছ আছে। এই গছবোৰক মই নিয়মিত পানী দিম আৰু তেঁওলোকৰ যত্ন ল'ম। গছপত্ৰবোৰৰ সেউজীয়া বৰ্ণ মোৰ মন ভাল কৰি তোলে আৰু মোৰ মূৰত এক ধৰণৰ শান্তি আনে।
|
4 |
+
মই জানো, আজি মোৰ কামবোৰ সৰলকৈ কৰিব লাগিব। প্রথমে, মই মোৰ পঢ়া পঢ়িম, তাৰ পাছত ঘৰখন পৰিষ্কাৰ কৰিম। তাৰ পাছত, মই ৰান্ধনী শেহ কৰি এক বিশেষ বঁটা খাম। আজিৰ দিনটো সফল আৰু আনন্দময় কৰি তুলিম।
|
5 |
+
শৰীৰৰ যত্ন লোৱাটো অতি গুৰুত্বপূর্ণ। নিয়মিত ব্যায়াম আৰু সুষম আহাৰ গ্ৰহণে শৰীৰ সুস্থ আৰু সবল ৰাখে। আমি সকলোৰে উচিত আমাৰ শৰীৰৰ যত্ন ল'ব আৰু সুস্থ জীৱন যাপন কৰাটো। স্বাস্থ্যেই সুখৰ মূল।
|
6 |
+
আজি আমাৰ পৰিয়ালে এটা বিশেষ উৎসৱ উদযাপন কৰিছে। ঘৰৰ সকলো সদস্য একেলগ হৈ থাকি এই উৎসৱত ভাগ লৈছে। নানা ধৰণৰ মিঠা আৰু পিঠা খাই, আনন্দেৰে দিনটো পাৰ কৰিছো। এই ধৰণৰ সময় আমাৰ পৰিয়ালৰ সম্পর্কক আৰু দৃঢ় কৰে।
|