Spaces:
Running
Running
Allen Park
commited on
Commit
·
9d86cbe
1
Parent(s):
e303329
add 3 inputs for function and rename from greet to model_call
Browse files
app.py
CHANGED
@@ -24,7 +24,7 @@ Your output should be in JSON FORMAT with the keys "REASONING" and "SCORE":
|
|
24 |
"""
|
25 |
|
26 |
|
27 |
-
def
|
28 |
NEW_FORMAT = PROMPT.format(question=question, document=document, answer=answer)
|
29 |
tokenizer = AutoTokenizer.from_pretrained("PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct")
|
30 |
model = AutoModelForCausalLM.from_pretrained("PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct", cache_dir='/tmp/cache', torch_dtype=torch.float16, low_cpu_mem_usage=True)
|
@@ -34,5 +34,11 @@ def greet(question, document, answer):
|
|
34 |
print(generated_text)
|
35 |
return generated_text
|
36 |
|
37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
demo.launch()
|
|
|
24 |
"""
|
25 |
|
26 |
|
27 |
+
def model_call(question, document, answer):
|
28 |
NEW_FORMAT = PROMPT.format(question=question, document=document, answer=answer)
|
29 |
tokenizer = AutoTokenizer.from_pretrained("PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct")
|
30 |
model = AutoModelForCausalLM.from_pretrained("PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct", cache_dir='/tmp/cache', torch_dtype=torch.float16, low_cpu_mem_usage=True)
|
|
|
34 |
print(generated_text)
|
35 |
return generated_text
|
36 |
|
37 |
+
inputs = [
|
38 |
+
gr.Textbox(label="Question"),
|
39 |
+
gr.Textbox(label="Document"),
|
40 |
+
gr.Textbox(label="Answer")
|
41 |
+
]
|
42 |
+
|
43 |
+
demo = gr.Interface(fn=model_call, inputs=inputs, outputs="text")
|
44 |
demo.launch()
|