File size: 1,910 Bytes
0703e71
210b40c
e303329
d232ed1
b5e9a85
d232ed1
0703e71
60ffe71
 
f833ec9
 
60ffe71
210b40c
 
 
 
 
090dd00
210b40c
 
 
090dd00
210b40c
 
 
090dd00
210b40c
 
 
 
 
 
60ffe71
9d86cbe
090dd00
5724967
76c4bfe
 
 
 
 
210b40c
 
 
d232ed1
9d86cbe
 
 
 
 
 
 
d232ed1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
import os
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr
import spaces

HF_TOKEN = os.environ.get("HF_TOKEN", None)
device = "cuda" # for GPU usage or "cpu" for CPU usage

tokenizer = AutoTokenizer.from_pretrained("PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct")
model = AutoModelForCausalLM.from_pretrained("PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct", torch_dtype=torch.float16, device_map="auto")

PROMPT = """
Given the following QUESTION, DOCUMENT and ANSWER you must analyze the provided answer and determine whether it is faithful to the contents of the DOCUMENT. The ANSWER must not offer new information beyond the context provided in the DOCUMENT. The ANSWER also must not contradict information provided in the DOCUMENT. Output your final verdict by strictly following this format: "PASS" if the answer is faithful to the DOCUMENT and "FAIL" if the answer is not faithful to the DOCUMENT. Show your reasoning.

--
QUESTION (THIS DOES NOT COUNT AS BACKGROUND INFORMATION):
{question}

--
DOCUMENT:
{document}

--
ANSWER:
{answer}

--

Your output should be in JSON FORMAT with the keys "REASONING" and "SCORE":
{{"REASONING": <your reasoning as bullet points>, "SCORE": <your final score>}}
"""
@spaces.GPU()
def model_call(question, document, answer):
    NEW_FORMAT = PROMPT.format(question=question, document=document, answer=answer)
    inputs = tokenizer(NEW_FORMAT, return_tensors="pt").to(device)
    model.generate(
        inputs.input_ids,
        attention_mask=inputs.attention_mask,
        pad_token_id=tokenizer.eos_token_id,
    )
    generated_text = tokenizer.decode(inputs.input_ids[0])
    print(generated_text)
    return generated_text

inputs = [
    gr.Textbox(label="Question"),
    gr.Textbox(label="Document"),
    gr.Textbox(label="Answer")
]

demo = gr.Interface(fn=model_call, inputs=inputs, outputs="text")
demo.launch()