Spaces:
Sleeping
Sleeping
File size: 1,173 Bytes
e2ccc4b 175ea08 e34a9a0 525a6a5 e34a9a0 525a6a5 db7ea0e 175ea08 db7ea0e 175ea08 db7ea0e 175ea08 db7ea0e 175ea08 2de0bdd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
import os
os.system('pip install streamlit transformers torch')
import streamlit as st
from transformers import GPT2LMHeadModel, GPT2Tokenizer
import torch
# Load the GPT-2 model and tokenizer
model_name = 'gpt2-large'
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
model = GPT2LMHeadModel.from_pretrained(model_name)
def generate_blog_post(topic):
try:
# Encode the input topic
inputs = tokenizer.encode(topic, return_tensors='pt')
# Generate the blog post
outputs = model.generate(inputs, max_length=500, num_return_sequences=1, no_repeat_ngram_size=2,
do_sample=True, top_k=50, top_p=0.95, temperature=0.9)
# Decode the generated text
blog_post = tokenizer.decode(outputs[0], skip_special_tokens=True)
return blog_post
except Exception as e:
st.error(f"Error: {e}")
return ""
# Streamlit app
st.title("Blog Post Generator")
st.write("Enter a topic to generate a blog post.")
topic = st.text_input("Topic:")
if st.button("Generate"):
with st.spinner('Generating...'):
blog_post = generate_blog_post(topic)
st.write(blog_post)
|