|
from langchain_core.runnables import RunnablePassthrough
|
|
from langchain_core.output_parsers import StrOutputParser
|
|
from langchain_community.chat_models import ChatOllama
|
|
from langchain_core.prompts import ChatPromptTemplate
|
|
from langchain_pinecone import PineconeVectorStore
|
|
from langchain_community.embeddings import SentenceTransformerEmbeddings
|
|
|
|
def make_chain_ollama(retriever):
|
|
def format_docs(docs):
|
|
|
|
return "\n\n".join(doc.page_content for doc in docs)
|
|
|
|
|
|
llm = ChatOllama(model="zephyr:latest")
|
|
|
|
template = "\"```\" Below is an instruction that describes a task. Write a response that appropriately completes the request."\
|
|
"์ ์ํ๋ context์์๋ง ๋๋ตํ๊ณ context์ ์๋ ๋ด์ฉ์ ๋ชจ๋ฅด๊ฒ ๋ค๊ณ ๋๋ตํด"\
|
|
"make answer in korean. ํ๊ตญ์ด๋ก ๋๋ตํ์ธ์"\
|
|
"\n\nContext:\n{context}\n;"\
|
|
"Question: {question}"\
|
|
"\n\nAnswer:"
|
|
|
|
prompt = ChatPromptTemplate.from_template(template)
|
|
|
|
rag_chain = (
|
|
{"context": retriever| format_docs, "question": RunnablePassthrough()}
|
|
| prompt
|
|
| llm
|
|
| StrOutputParser()
|
|
)
|
|
|
|
return rag_chain
|
|
|