import streamlit as st
import numpy as np
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
from normalizer import normalize
# Set the page configuration
st.set_page_config(
page_title="Bengali to English Translator App",
page_icon=":shield:",
initial_sidebar_state="auto"
)
# Load custom CSS styling
with open("assets/style.css") as f:
st.markdown("".format(f.read()), unsafe_allow_html=True)
# Function to load the pre-trained model
def get_model():
tokenizer = AutoTokenizer.from_pretrained("kazalbrur/BanglaEnglishTokenizerBanglaT5", use_fast=True)
model = AutoModelForSeq2SeqLM.from_pretrained("kazalbrur/BanglaEnglishTranslationBanglaT5")
return tokenizer, model
# Load the tokenizer and model
tokenizer, model = get_model()
# Add a header to the Streamlit app with custom CSS for black font color
st.markdown("
Bengali to English Translator
", unsafe_allow_html=True)
# Add placeholder text with custom CSS styling
st.markdown("Enter your Bengali text here", unsafe_allow_html=True)
# Text area for user input with label and height set to 250
user_input = st.text_area("Enter your Bengali text here", "", height=250, label_visibility="collapsed")
# Button for submitting the input
submit_button = st.button("Translate")
# Perform prediction when user input is provided and the submit button is clicked
if user_input and submit_button:
input_ids = tokenizer(normalize(user_input), padding=True, truncation=True, max_length=128, return_tensors="pt").input_ids
generated_tokens = model.generate(input_ids, max_new_tokens=128)
decoded_tokens = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
st.write(f"English Translation: {decoded_tokens}", unsafe_allow_html=True)