Spaces:
Running
Running
File size: 1,796 Bytes
e63103b 8b9c100 68d3cc8 e63103b a37a365 e63103b 1a93363 8b9c100 869c9f9 8b9c100 fc85f25 8b9c100 869c9f9 e63103b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
from langchain_community.document_loaders import PyPDFLoader
import os
from langchain_openai import ChatOpenAI
from langchain_chroma import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_core.prompts import ChatPromptTemplate
from langchain_huggingface import HuggingFaceEndpoint
from setup.environment import default_model
os.environ.get("OPENAI_API_KEY")
os.environ.get("HUGGINGFACEHUB_API_TOKEN")
def getPDF(file_path="./nike.pdf"):
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
loader = PyPDFLoader(file_path, extract_images=False)
pages = loader.load_and_split(text_splitter)
return pages
def create_retriever(documents):
vectorstore = Chroma.from_documents(
documents,
embedding=OpenAIEmbeddings(),
)
retriever = vectorstore.as_retriever(
search_type="similarity",
search_kwargs={"k": 1},
)
return retriever
def create_prompt_llm_chain(system_prompt, modelParam):
if modelParam == default_model:
model = ChatOpenAI(model=modelParam)
else:
model = HuggingFaceEndpoint(
repo_id=modelParam,
task="text-generation",
max_new_tokens=100,
do_sample=False,
huggingfacehub_api_token=os.environ.get("HUGGINGFACEHUB_API_TOKEN")
)
# result = model.invoke("Hugging Face is")
# print('result: ', result)
system_prompt = system_prompt + "\n\n" + "{context}"
prompt = ChatPromptTemplate.from_messages(
[
("system", system_prompt),
("human", "{input}"),
]
)
question_answer_chain = create_stuff_documents_chain(model, prompt)
return question_answer_chain |