GSridhar1982 commited on
Commit
64be64c
1 Parent(s): 1062cdf

Updated the model

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -5,7 +5,7 @@ import subprocess
5
  def predict(text):
6
  # Call llama.cpp with the input text
7
  result = subprocess.run(
8
- ["./llama.cpp/main", "-m", "path/to/your/model", "-p", text],
9
  capture_output=True,
10
  text=True
11
  )
@@ -14,8 +14,8 @@ def predict(text):
14
  # Create a Gradio interface
15
  iface = gr.Interface(
16
  fn=predict,
17
- inputs=gr.Textbox(lines=2, placeholder="Enter text here..."),
18
- outputs="text",
19
  title="LLaMA Model Inference",
20
  description="Enter text to generate using the LLaMA model."
21
  )
 
5
  def predict(text):
6
  # Call llama.cpp with the input text
7
  result = subprocess.run(
8
+ ["./llama.cpp/main", "-m", "GSridhar1982/QA_Llama31_Quantized_GGUF", "-p", text],
9
  capture_output=True,
10
  text=True
11
  )
 
14
  # Create a Gradio interface
15
  iface = gr.Interface(
16
  fn=predict,
17
+ inputs=gr.Textbox(lines=2, placeholder="Enter question here..."),
18
+ outputs="Answer",
19
  title="LLaMA Model Inference",
20
  description="Enter text to generate using the LLaMA model."
21
  )