from huggingface_hub import InferenceClient import gradio as gr # Set up the client for Mistral model inference client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3") def generate_text(prompt): response = client.text_generation(prompt, max_new_tokens=200, temperature=0.7) return response def generate_argument(topic, stance): prompt = f"""Generate a compelling argument for the following debate topic. Topic: {topic} Stance: {stance} Your response should be a well-structured argument supporting the given stance on the topic.""" return generate_text(prompt) def generate_counterargument(topic, original_argument): prompt = f"""Generate a strong counterargument for the following debate topic and argument. Topic: {topic} Original Argument: {original_argument} Your response should be a well-structured counterargument addressing the points made in the original argument.""" return generate_text(prompt) def debate_assistant(topic, stance): argument = generate_argument(topic, stance) counterargument = generate_counterargument(topic, argument) return f"Argument:\n{argument}\n\nCounterargument:\n{counterargument}" # Create the Gradio interface iface = gr.Interface( fn=debate_assistant, inputs=[ gr.Textbox(label="Debate Topic"), gr.Radio(["For", "Against"], label="Stance") ], outputs=gr.Textbox(label="Generated Debate Arguments"), title="AI-powered Debate Assistant (Mistral-7B-Instruct-v0.3)", description="Enter a debate topic and choose a stance to generate arguments and counterarguments using Mistral-7B-Instruct-v0.3." ) # Launch the interface iface.launch()