Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
model_name = 'openai-community/gpt2-large' | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) | |
def generate_blogpost(topic, max_length=500, temperature=0.7): | |
prompt = f"Write a blog post about {topic}:\n\n" | |
# Encode input: | |
inputs_encoded = tokenizer(prompt, return_tensors='pt') | |
# Model Output: | |
model_output = model.generate( | |
inputs_encoded["input_ids"], | |
max_new_tokens=max_length, | |
do_sample=True, | |
temperature=temperature | |
)[0] | |
# Decode the output | |
output = tokenizer.decode(model_output, skip_special_tokens=True) | |
# Remove the prompt from the generated text | |
blog_post = output[len(prompt):].strip() | |
return blog_post | |
# Create the Gradio interface | |
iface = gr.Interface( | |
fn=generate_blogpost, | |
inputs=[ | |
gr.Textbox(lines=1, placeholder="Enter the blog post topic here..."), | |
gr.Slider(minimum=100, maximum=1000, step=50, label="Max Length", value=500), | |
gr.Slider(minimum=0.1, maximum=1.0, step=0.1, label="Temperature", value=0.7) | |
], | |
outputs="text", | |
title="GPT2 Blog Post Generator", | |
description="Enter a topic, and this app will generate a blog post using GPT-2." | |
) | |
# Launch the app | |
iface.launch() | |