Spaces:
Runtime error
Runtime error
File size: 2,773 Bytes
4d10a3f 9aa7803 0ff896e 9aa7803 eb36fed 9aa7803 852d9e7 4d10a3f 9aa7803 852d9e7 eb36fed 4d10a3f 2970eac eb36fed 9aa7803 eb36fed 0ff896e 852d9e7 eb36fed 852d9e7 0ff896e eb36fed 0ff896e eb36fed 0ff896e eb36fed 0ff896e eb36fed 9aa7803 eb36fed 0ff896e 852d9e7 0ff896e 852d9e7 0ff896e eb36fed 0ff896e 9aa7803 0ff896e 9aa7803 0ff896e eb36fed 0ff896e eb36fed 0ff896e 9aa7803 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 |
from huggingface_hub import InferenceClient
import gradio as gr
import random
# Set up the client for FLAN-T5-XL model inference
client = InferenceClient("google/flan-t5-xl")
def generate_text(prompt, max_length=500):
response = client.text_generation(prompt, max_new_tokens=max_length, temperature=0.7)
return response
def debate_assistant(topic, stance):
# Generate the main argument
argument_prompt = f"""Generate a comprehensive argument for the following debate topic:
Topic: {topic}
Stance: {stance}
Include:
1. Main claim
2. Three supporting points
3. Potential counterargument
4. Rebuttal
5. Conclusion"""
argument = generate_text(argument_prompt, max_length=600)
# Generate a counterargument
counter_prompt = f"""Generate a counterargument for the following debate topic:
Topic: {topic}
Original Stance: {stance}
Include:
1. Counter-claim
2. Three rebuttals
3. New supporting point
4. Conclusion"""
counterargument = generate_text(counter_prompt, max_length=600)
# Generate analysis
analysis_prompt = f"""Analyze the debate on the following topic:
Topic: {topic}
Provide:
1. Topic categorization
2. Two ethical considerations
3. Three discussion questions"""
analysis = generate_text(analysis_prompt, max_length=400)
return f"""Argument ({stance}):
{argument}
Counterargument:
{counterargument}
Analysis:
{analysis}"""
def suggest_topic():
topics = [
"Should artificial intelligence be regulated?",
"Is universal basic income a viable economic policy?",
"Should voting be mandatory?",
"Is space exploration a worthwhile investment?",
"Should gene editing in humans be allowed?",
"Is nuclear energy the solution to climate change?",
"Should social media platforms be held responsible for user content?",
"Is a four-day work week beneficial for society?",
"Should animal testing be banned?",
"Is globalization overall positive or negative for developing countries?"
]
return random.choice(topics)
# Create the Gradio interface
iface = gr.Interface(
fn=debate_assistant,
inputs=[
gr.Textbox(label="Debate Topic", placeholder="Enter a topic or click 'Suggest Topic'"),
gr.Radio(["For", "Against"], label="Stance")
],
outputs=gr.Textbox(label="Generated Debate Content"),
title="AI-powered Debate Assistant (FLAN-T5-XL)",
description="Enter a debate topic and choose a stance to generate arguments, counterarguments, and analysis.",
examples=[
["Should artificial intelligence be regulated?", "For"],
["Is universal basic income a viable economic policy?", "Against"],
["Should gene editing in humans be allowed?", "For"]
]
)
iface.launch() |