import os import torch from transformers import AutoModelForCausalLM, AutoTokenizer import gradio as gr import spaces HF_TOKEN = os.environ.get("HF_TOKEN", None) if torch.cuda.is_available(): device = "cuda:0" else: device = "cpu" tokenizer = AutoTokenizer.from_pretrained("PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct") model = AutoModelForCausalLM.from_pretrained("PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct", torch_dtype=torch.float16, device_map="auto").to(device) model.gradient_checkpointing_enable() # def load_model_and_tokenizer(model_choice): # if model_choice == "Patronus Lynx 8B": # model_name = "PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct" # else: # model_name = "PatronusAI/Llama-3-Patronus-Lynx-70B-Instruct" # tokenizer = AutoTokenizer.from_pretrained(model_name) # model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto").to(device) # model.gradient_checkpointing_enable() # return tokenizer, model PROMPT = """ Given the following QUESTION, DOCUMENT and ANSWER you must analyze the provided answer and determine whether it is faithful to the contents of the DOCUMENT. The ANSWER must not offer new information beyond the context provided in the DOCUMENT. The ANSWER also must not contradict information provided in the DOCUMENT. Output your final verdict by strictly following this format: "PASS" if the answer is faithful to the DOCUMENT and "FAIL" if the answer is not faithful to the DOCUMENT. Show your reasoning. -- QUESTION (THIS DOES NOT COUNT AS BACKGROUND INFORMATION): {question} -- DOCUMENT: {document} -- ANSWER: {answer} -- Your output should be in JSON FORMAT with the keys "REASONING" and "SCORE": {{"REASONING": , "SCORE": }} """ HEADER = """ # Patronus Lynx Demo
**Patronus Lynx** is a state-of-the-art open-source model for hallucination detection. **Getting Started**: Provide a question and document or context given to your model in addition to the answer given by the model and then click submit. The output panel will indicate whether the reponse is a hallucination (Fail) or if it is faithful to the given document or context (Pass) through the score Pass or Fail and provide reasoning behind the score. """ @spaces.GPU() # def model_call(question, document, answer, tokenizer, model): def model_call(question, document, answer): device = next(model.parameters()).device NEW_FORMAT = PROMPT.format(question=question, document=document, answer=answer) print("ENTIRE NEW_FORMAT", NEW_FORMAT) inputs = tokenizer(NEW_FORMAT, return_tensors="pt").to(device) print("INPUTS", inputs) input_ids = inputs.input_ids attention_mask = inputs.attention_mask generate_kwargs = dict( input_ids=input_ids, do_sample=True, attention_mask=attention_mask, pad_token_id=tokenizer.eos_token_id, ) print("GENERATE_KWARGS", generate_kwargs) with torch.no_grad(): outputs = model.generate(**generate_kwargs) print("OUTPUTS", outputs) generated_text = tokenizer.decode(outputs[0]) print(generated_text) return generated_text # def update_model(model_choice, tokenizer_state, model_state): # new_tokenizer, new_model = load_model_and_tokenizer(model_choice) # print("UPDATED MODEL", new_tokenizer, new_model) # return new_tokenizer, new_model inputs = [ gr.Textbox(label="Question"), gr.Textbox(label="Document"), gr.Textbox(label="Answer") ] # output = gr.Textbox(label="Output") # submit_button = gr.Button("Submit") with gr.Blocks() as demo: gr.Markdown(HEADER) gr.Interface(fn=model_call, inputs=inputs, outputs="text") # tokenizer_state = gr.State() # model_state = gr.State() # model_dropdown = gr.Dropdown(choices=["Patronus Lynx 8B", "Patronus Lynx 70B"], value="Patronus Lynx 8B", label="Model") # model_dropdown.change(fn=update_model, inputs=[model_dropdown, tokenizer_state, model_state], outputs=[tokenizer_state, model_state]) # submit_button.click(fn=model_call, inputs=inputs, outputs=output) # initial_tokenizer, initial_model = load_model_and_tokenizer("Patronus Lynx 8B") # demo.load(fn=lambda: (initial_tokenizer, initial_model), outputs=[tokenizer_state, model_state]) demo.launch()