Spaces:
Sleeping
Sleeping
File size: 1,013 Bytes
db7ea0e e2ccc4b e34a9a0 c9d9455 e34a9a0 db7ea0e 2de0bdd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
# Load the GPT-2 model and tokenizer
import os
os.system('pip install streamlit transformers torch')
import streamlit as st
from transformers import GPT2LMHeadModel, GPT2Tokenizer
import torch
model_name = 'gpt2-large'
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
model = GPT2LMHeadModel.from_pretrained(model_name)
def generate_blog_post(topic):
# Encode the input topic
inputs = tokenizer.encode(topic, return_tensors='pt')
# Generate the blog post
outputs = model.generate(inputs, max_length=500, num_return_sequences=1, no_repeat_ngram_size=2,
do_sample=True, top_k=50, top_p=0.95, temperature=0.9)
# Decode the generated text
blog_post = tokenizer.decode(outputs[0], skip_special_tokens=True)
return blog_post
# Streamlit app
st.title("Blog Post Generator")
st.write("Enter a topic to generate a blog post.")
topic = st.text_input("Topic:")
if st.button("Generate"):
blog_post = generate_blog_post(topic)
st.write(blog_post)
|