File size: 3,387 Bytes
89cbc4d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
#####################################################
### DOCUMENT PROCESSOR [PROMPTS]
#####################################################
# Jonathan Wang

# ABOUT: 
# This project creates an app to chat with PDFs.

# This is the prompts sent to the LLM.
#####################################################
## TODOS:
# Use the row names instead of .at indesx locators
# This is kinda dumb because we read the same .csv file over again
    # Should we structure this abstraction differently?

#####################################################
## IMPORTS:
import pandas as pd
from llama_index.core import PromptTemplate

#####################################################
## CODE:

# https://github.com/run-llama/llama_index/blob/main/llama-index-core/llama_index/core/prompts/default_prompts.py
QA_PROMPT = """Context information is below.\n
---------------------
{context_str}
---------------------
Given the context information, answer the query.
You must adhere to the following rules:
- Use the context information, not prior knowledge.
- End the answer with any brief quote(s) from the context that are the most essential in answering the question. 
    - If the context is not helpful in answering the question, do not include a quote.

Query: {query_str}
Answer: """

# https://github.com/run-llama/llama_index/blob/main/llama-index-core/llama_index/core/prompts/default_prompts.py
REFINE_PROMPT = """The original query is as follows: {query_str}
We have provided an existing answer: {existing_answer}
We have the opportunity to refine the existing answer (only if needed) with some more context below.
---------------------
{context_msg}
---------------------
Given the new context, refine the original answer to better answer the query.
You must adhere to the following rules:
- If the context isn't useful, return the original answer.
- End the answer with any brief quote(s) from the original answer or new context that are the most essential in answering the question. 
    - If the new context is not helpful in answering the question, leave the original answer unchanged.

Refined Answer: """

def get_qa_prompt(
    # prompt_file_path: str
) -> PromptTemplate:
    """Given a path to the prompts, get prompt for Question-Answering"""
    # prompts = pd.read_csv(prompt_file_path)
    # https://github.com/run-llama/llama_index/blob/main/llama-index-core/llama_index/core/prompts/default_prompts.py
    custom_qa_prompt = PromptTemplate(
        QA_PROMPT
    )
    return (custom_qa_prompt)


def get_refine_prompt(
    # prompt_file_path: str
) -> PromptTemplate:
    """Given a path to the prompts, get prompt to Refine answer after new info"""
    # prompts = pd.read_csv(prompt_file_path)
    # https://github.com/run-llama/llama_index/blob/main/llama-index-core/llama_index/core/prompts/default_prompts.py
    custom_refine_prompt = PromptTemplate(
        REFINE_PROMPT
    )
    return (custom_refine_prompt)


# def get_reqdoc_prompt(
#     prompt_file_path: str
# ) -> PromptTemplate:
#     """Given a path to the prompts, get prompt to identify requested info from document."""
#     prompts = pd.read_csv(prompt_file_path)
#     # https://github.com/run-llama/llama_index/blob/main/llama-index-core/llama_index/core/prompts/default_prompts.py
#     reqdoc_prompt = PromptTemplate(
#         prompts.at[2, 'Prompt']
#     )
#     return (reqdoc_prompt)