File size: 1,663 Bytes
4d10a3f
9aa7803
 
4d10a3f
 
9aa7803
4d10a3f
 
 
9aa7803
 
4d10a3f
 
 
2970eac
 
 
9aa7803
 
4d10a3f
 
 
2970eac
 
 
9aa7803
 
 
 
2970eac
9aa7803
 
 
 
 
 
 
 
 
4d10a3f
 
9aa7803
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
from huggingface_hub import InferenceClient
import gradio as gr

# Set up the client for Mistral model inference
client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")

def generate_text(prompt):
    response = client.text_generation(prompt, max_new_tokens=200, temperature=0.7)
    return response

def generate_argument(topic, stance):
    prompt = f"""Generate a compelling argument for the following debate topic.
Topic: {topic}
Stance: {stance}

Your response should be a well-structured argument supporting the given stance on the topic."""
    return generate_text(prompt)

def generate_counterargument(topic, original_argument):
    prompt = f"""Generate a strong counterargument for the following debate topic and argument.
Topic: {topic}
Original Argument: {original_argument}

Your response should be a well-structured counterargument addressing the points made in the original argument."""
    return generate_text(prompt)

def debate_assistant(topic, stance):
    argument = generate_argument(topic, stance)
    counterargument = generate_counterargument(topic, argument)
    return f"Argument:\n{argument}\n\nCounterargument:\n{counterargument}"

# Create the Gradio interface
iface = gr.Interface(
    fn=debate_assistant,
    inputs=[
        gr.Textbox(label="Debate Topic"),
        gr.Radio(["For", "Against"], label="Stance")
    ],
    outputs=gr.Textbox(label="Generated Debate Arguments"),
    title="AI-powered Debate Assistant (Mistral-7B-Instruct-v0.3)",
    description="Enter a debate topic and choose a stance to generate arguments and counterarguments using Mistral-7B-Instruct-v0.3."
)

# Launch the interface
iface.launch()