waqasali1707's picture
Update app.py
09e48aa verified
import streamlit as st
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
# Load model and tokenizer
model_name = 'gpt2-large'
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
# Streamlit UI
st.title("Blog Post Generator")
text = st.text_area("Enter the starting text for your blog post:")
# Configuration for generation
generation_config = GenerationConfig(max_new_tokens=200, do_sample=True, temperature=0.7)
if text:
try:
# Encode input
inputs_encoded = tokenizer(text, return_tensors='pt')
# Generate output
with torch.no_grad():
model_output = model.generate(inputs_encoded["input_ids"], generation_config=generation_config)[0]
# Decode output
output = tokenizer.decode(model_output, skip_special_tokens=True)
# Display result
st.write("Generated Blog Post:")
st.write(output)
except Exception as e:
st.error(f"An error occurred: {e}")