Spaces:
Runtime error
Runtime error
from transformers import AutoTokenizer | |
from langchain_core.prompts import PromptTemplate | |
from typing import List | |
import models | |
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct") | |
raw_prompt = "{question}" | |
history_prompt = """ | |
Given the following conversation provide a helpful answer to the follow up question. | |
Chat History: | |
{chat_history} | |
Follow Up question: {question} | |
helpful answer: | |
""" | |
question_prompt = """ | |
Given the following conversation and a follow up question, rephrase the | |
follow up question to be a standalone question, in its original language. | |
Chat History: | |
{chat_history} | |
Follow Up Input: {question} | |
Standalone question: | |
""" | |
context_prompt = """ | |
Answer the question based only on the following context: | |
{context} | |
Question: {standalone_question} | |
""" | |
map_prompt = """ | |
Given the following list of file paths, return a comma separated list of the most likely files to have content that could potentially help answer the question. Return nothing if none of those would help. | |
Make sure to return the complete full paths as it is writen in the original list | |
File list: | |
{file_list} | |
Question: {question} | |
Return a comma separated list of files and nothing else! | |
Comma separated list: | |
""" | |
def format_prompt(prompt): | |
chat = [ | |
{"role": "system", "content": "You are a helpful AI assistant."}, | |
{"role": "user", "content": prompt}, | |
] | |
formatted_prompt = tokenizer.apply_chat_template( | |
chat, | |
tokenize=False, | |
add_generation_prompt=True | |
) | |
return PromptTemplate.from_template(formatted_prompt) | |
def format_chat_history(messages: List[models.Message]): | |
return '\n'.join([ | |
'{}: {}'.format(message.type, message.message) | |
for message in messages | |
]) | |
def format_context(docs: List[str]): | |
return '\n\n'.join(docs) | |
raw_prompt_formatted = format_prompt(raw_prompt) | |
history_prompt_formatted = format_prompt(history_prompt) | |
question_prompt_formatted = format_prompt(question_prompt) | |
context_prompt_formatted = format_prompt(context_prompt) | |
map_prompt_formatted = format_prompt(map_prompt) | |