Sugamdeol commited on
Commit
4d10a3f
·
verified ·
1 Parent(s): 23fbaec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -18
app.py CHANGED
@@ -1,26 +1,28 @@
 
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
- # Load the model and tokenizer
5
- tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3.1-8B")
6
- model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3.1-8B")
7
 
8
- def generate_text(prompt, max_length=150):
9
- inputs = tokenizer(prompt, return_tensors="pt")
10
- outputs = model.generate(**inputs, max_length=max_length, num_return_sequences=1, do_sample=True)
11
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
12
 
13
  def generate_argument(topic, stance):
14
- prompt = f"Generate a compelling argument for the following debate topic.\nTopic: {topic}\nStance: {stance}\nArgument:"
15
- response = generate_text(prompt, max_length=200)
16
- argument = response.split("Argument:")[1].strip()
17
- return argument
 
 
18
 
19
  def generate_counterargument(topic, original_argument):
20
- prompt = f"Generate a strong counterargument for the following debate topic and argument.\nTopic: {topic}\nOriginal Argument: {original_argument}\nCounterargument:"
21
- response = generate_text(prompt, max_length=200)
22
- counterargument = response.split("Counterargument:")[1].strip()
23
- return counterargument
 
 
24
 
25
  def debate_assistant(topic, stance):
26
  argument = generate_argument(topic, stance)
@@ -35,8 +37,8 @@ iface = gr.Interface(
35
  gr.Radio(["For", "Against"], label="Stance")
36
  ],
37
  outputs=gr.Textbox(label="Generated Debate Arguments"),
38
- title="AI-powered Debate Assistant (Meta-Llama 3.1)",
39
- description="Enter a debate topic and choose a stance to generate arguments and counterarguments using Meta-Llama 3.1."
40
  )
41
 
42
  # Launch the interface
 
1
+ from huggingface_hub import InferenceClient
2
  import gradio as gr
 
3
 
4
+ # Set up the client for Mistral model inference
5
+ client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
 
6
 
7
+ def generate_text(prompt):
8
+ response = client.text_generation(prompt, max_new_tokens=200, temperature=0.7)
9
+ return response
 
10
 
11
  def generate_argument(topic, stance):
12
+ prompt = f"""Generate a compelling argument for the following debate topic.
13
+ Topic: {topic}
14
+ Stance: {stance}
15
+ Argument:"""
16
+ response = generate_text(prompt)
17
+ return response.split("Argument:")[1].strip()
18
 
19
  def generate_counterargument(topic, original_argument):
20
+ prompt = f"""Generate a strong counterargument for the following debate topic and argument.
21
+ Topic: {topic}
22
+ Original Argument: {original_argument}
23
+ Counterargument:"""
24
+ response = generate_text(prompt)
25
+ return response.split("Counterargument:")[1].strip()
26
 
27
  def debate_assistant(topic, stance):
28
  argument = generate_argument(topic, stance)
 
37
  gr.Radio(["For", "Against"], label="Stance")
38
  ],
39
  outputs=gr.Textbox(label="Generated Debate Arguments"),
40
+ title="AI-powered Debate Assistant (Mistral-7B-Instruct-v0.3)",
41
+ description="Enter a debate topic and choose a stance to generate arguments and counterarguments using Mistral-7B-Instruct-v0.3."
42
  )
43
 
44
  # Launch the interface