Spaces:
Sleeping
Sleeping
File size: 1,522 Bytes
5ac8ef8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
# -*- coding: utf-8 -*-
import numpy as np
import streamlit as st
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
st.set_page_config(
page_title="", layout="wide", initial_sidebar_state="expanded"
)
@st.cache
def load_model(model_name):
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
return model
tokenizer = AutoTokenizer.from_pretrained("snoop2head/KoBrailleT5-small-v1")
model = load_model("snoop2head/KoBrailleT5-small-v1")
st.title("ํ๊ตญ์ด ์ ์ญ๊ณผ ์ญ์ ์ญ")
st.write("Braille Pattern Conversion")
default_value = '์์คํค ๋ธ๋๋ ๋ธ๋ฃจ์ง ํ์ดํ'
src_text = st.text_area(
"๋ฒ์ญํ๊ณ ์ถ์ ๋ฌธ์ฅ์ ์
๋ ฅํ์ธ์:",
default_value,
height=300,
max_chars=100,
)
print(src_text)
if src_text == "":
st.warning("Please **enter text** for translation")
else:
# translate into english sentence
translation_result = model.generate(
**tokenizer(
src_text,
return_tensors="pt",
padding="max_length",
truncation=True,
max_length=64,
),
max_length=64,
num_beams=5,
repetition_penalty=1.3,
no_repeat_ngram_size=3,
num_return_sequences=1,
)
translation_result = tokenizer.decode(
translation_result[0],
clean_up_tokenization_spaces=True,
skip_special_tokens=True,
)
print(f"{src_text} -> {translation_result}")
st.write(translation_result)
print(translation_result)
|