BeardedMonster commited on
Commit
679d19d
·
verified ·
1 Parent(s): 4478401

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -52,7 +52,7 @@ st.title("SabiYarn-125M: Generates text in multiple Nigerian languages.")
52
 
53
  st.write("**Supported Languages: English, Yoruba, Igbo, Hausa, Pidgin, Efik, Urhobo, Fulfulde, Fulah. \nResults might not be coherent for less represented languages (i.e Efik, \
54
  Urhobo, Fulfulde, Fulah).**")
55
- # st.write("****")
56
  st.write("**For convenience, you can use chatgpt to copy text and evaluate model output.**")
57
  st.write("-" * 50)
58
 
@@ -146,10 +146,12 @@ user_input = instruction_wrap.get(sample_texts[user_input], sample_texts[user_in
146
  if st.button("Generate"):
147
  if user_input:
148
  try:
 
149
  st.write("**Generated Text Below:**")
150
  wrapped_input = wrap_text(user_input, task_value)
151
  print("wrapped_input: ", wrapped_input)
152
  generation_config["max_new_tokens"]= min(max_new_tokens, 1024 - len(tokenizer.tokenize(wrapped_input)))
 
153
  try:
154
  # Attempt the asynchronous API call
155
  generation_config["max_new_tokens"] = min(max_new_tokens, 1024 - len(tokenizer.tokenize(wrapped_input)))
@@ -163,7 +165,7 @@ if st.button("Generate"):
163
 
164
  full_output = st.empty()
165
 
166
- start_time = time.time()
167
  output = ""
168
  for next_token in tokenizer.tokenize(generated_text):
169
  output += tokenizer.convert_tokens_to_string([next_token])
 
52
 
53
  st.write("**Supported Languages: English, Yoruba, Igbo, Hausa, Pidgin, Efik, Urhobo, Fulfulde, Fulah. \nResults might not be coherent for less represented languages (i.e Efik, \
54
  Urhobo, Fulfulde, Fulah).**")
55
+ st.write("**It might take a while to return an output on the first 'generate' click.**")
56
  st.write("**For convenience, you can use chatgpt to copy text and evaluate model output.**")
57
  st.write("-" * 50)
58
 
 
146
  if st.button("Generate"):
147
  if user_input:
148
  try:
149
+
150
  st.write("**Generated Text Below:**")
151
  wrapped_input = wrap_text(user_input, task_value)
152
  print("wrapped_input: ", wrapped_input)
153
  generation_config["max_new_tokens"]= min(max_new_tokens, 1024 - len(tokenizer.tokenize(wrapped_input)))
154
+ start_time = time.time()
155
  try:
156
  # Attempt the asynchronous API call
157
  generation_config["max_new_tokens"] = min(max_new_tokens, 1024 - len(tokenizer.tokenize(wrapped_input)))
 
165
 
166
  full_output = st.empty()
167
 
168
+
169
  output = ""
170
  for next_token in tokenizer.tokenize(generated_text):
171
  output += tokenizer.convert_tokens_to_string([next_token])