Spaces:
Sleeping
Sleeping
import streamlit as st | |
import torch | |
from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer | |
class StreamlitStreamer(TextStreamer): | |
def on_finalized_text(self, text: str, stream_end: bool = False): | |
st.session_state['new_mail'] += text | |
new_mail.write(st.session_state['new_mail']) | |
device = 'cuda' if torch.cuda.is_available() else 'cpu' | |
def load_model(): | |
return AutoModelForCausalLM.from_pretrained( | |
"tomaszki/mail_fixer", | |
).to(device) | |
def load_tokenizer(): | |
return AutoTokenizer.from_pretrained("facebook/opt-125m") | |
model = load_model() | |
tokenizer = load_tokenizer() | |
st.title('Mail fixer') | |
mail = st.text_area('Enter your mail here') | |
new_mail = st.text('') | |
if mail: | |
st.session_state['new_mail'] = '' | |
streamer = StreamlitStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) | |
prompt = f'Original email:\n{mail}\nFixed email:\n' | |
tokenized = tokenizer(prompt, return_tensors='pt').to(device) | |
output = model.generate(**tokenized, max_new_tokens=1024, streamer=streamer) |