Spaces:
Runtime error
Runtime error
File size: 6,946 Bytes
dfa737c dd9ce97 dfa737c dd9ce97 dfa737c dd9ce97 15d201d dd9ce97 15d201d dd9ce97 dfa737c 15d201d dfa737c 15d201d dfa737c 15d201d dfa737c 15d201d dfa737c dd9ce97 dfa737c dd9ce97 dfa737c 15d201d dfa737c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 |
import streamlit as st
from dotenv import load_dotenv
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter,RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings
from langchain.vectorstores import FAISS, Chroma
from langchain.embeddings import HuggingFaceEmbeddings # General embeddings from HuggingFace models.
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from htmlTemplates import css, bot_template, user_template
from langchain.llms import HuggingFaceHub, LlamaCpp,CTransformers # For loading transformer models.
from langchain.document_loaders import PyPDFLoader
from tempfile import NamedTemporaryFile
def get_pdf_text(pdf_docs):
# text = ''
# pdf_file_ = open(pdf_docs,'rb')
# text = "example hofjin"
# for page in pdf_reader.pages:
# text += page.extract_text()
# return text
with NamedTemporaryFile() as temp_file:
temp_file.write(pdf_docs.getvalue())
temp_file.seek(0)
pdf_loader = PyPDFLoader(temp_file.name)
# print('pdf_loader = ', pdf_loader)
pdf_doc = pdf_loader.load()
# print('pdf_doc = ',pdf_doc)
return pdf_doc
def get_text_chunks(documents):
text_splitter = RecursiveCharacterTextSplitter(
chunk_size = 1000,
chunk_overlap = 200,
length_function= len
)
# text_splitter = CharacterTextSplitter(
# separator="\n",
# chunk_size=10f00,
# chunk_overlap=200,
# length_function=len
# )
documents = text_splitter.split_documents(documents)
print('documents = ', documents)
return documents
def get_vectorstore(text_chunks):
# Load the desired embeddings model.
embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L12-v2',
model_kwargs={'device': 'cpu'})
print('embeddings = ', embeddings)
# embeddings = OpenAIEmbeddings()sentence-transformers/all-MiniLM-L6-v2
# embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl",
# model_kwargs={'device':'cpu'})
vectorstore = FAISS.from_documents(texts=text_chunks, embedding=embeddings)
# vectorstore = Chroma.from_texts(texts=text_chunks, embedding=embeddings)
return vectorstore
def get_conversation_chain(vectorstore):
model_path = 'llama-2-7b-chat.Q2_K.gguf'
# llm = ChatOpenAI()
# llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512})
config = {'max_new_tokens': 2048}
# llm = CTransformers(model="llama-2-7b-chat.ggmlv3.q2_K.bin", model_type="llama", config=config)
llm = LlamaCpp(model_path=model_path,
input={"temperature": 0.75, "max_length": 2000, "top_p": 1},
verbose=True, )
memory = ConversationBufferMemory(
memory_key='chat_history', return_messages=True)
conversation_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vectorstore.as_retriever(),
memory=memory
)
return conversation_chain
def handle_userinput(user_question):
response = st.session_state.conversation({'question': user_question})
st.session_state.chat_history = response['chat_history']
for i, message in enumerate(st.session_state.chat_history):
if i % 2 == 0:
st.write(user_template.replace(
"{{MSG}}", message.content), unsafe_allow_html=True)
else:
st.write(bot_template.replace(
"{{MSG}}", message.content), unsafe_allow_html=True)
def get_text_file(docs):
text = docs.read().decode("utf-8")
return text
def get_csv_file(docs):
import pandas as pd
text = ''
data = pd.read_csv(docs)
for index, row in data.iterrows():
item_name = row[0]
row_text = item_name
for col_name in data.columns[1:]:
row_text += '{} is {} '.format(col_name, row[col_name])
text += row_text + '\n'
return text
def get_json_file(docs):
import json
text = ''
# with open(docs, 'r') as f:
json_data = json.load(docs)
for f_key, f_value in json_data.items():
for s_value in f_value:
text += str(f_key) + str(s_value)
text += '\n'
#print(text)
return text
def get_hwp_file(docs):
pass
def get_docs_file(docs):
pass
def main():
load_dotenv()
st.set_page_config(page_title="Chat with multiple PDFs",
page_icon=":books:")
st.write(css, unsafe_allow_html=True)
if "conversation" not in st.session_state:
st.session_state.conversation = None
if "chat_history" not in st.session_state:
st.session_state.chat_history = None
st.header("Chat with multiple PDFs :books:")
user_question = st.text_input("Ask a question about your documents:")
if user_question:
handle_userinput(user_question)
with st.sidebar:
st.subheader("Your documents")
docs = st.file_uploader(
"Upload your PDFs here and click on 'Process'", accept_multiple_files=True)
if st.button("Process"):
with st.spinner("Processing"):
# get pdf text
doc_list = []
for file in docs:
print('file - type : ', file.type)
if file.type == 'text/plain':
#file is .txt
raw_text += get_text_file(file)
elif file.type in ['application/octet-stream', 'application/pdf']:
#file is .pdf
doc_list.append(get_pdf_text(file))
elif file.type == 'text/csv':
#file is .csv
raw_text += get_csv_file(file)
elif file.type == 'application/json':
# file is .json
raw_text += get_json_file(file)
elif file.type == 'application/x-hwp':
# file is .hwp
raw_text += get_hwp_file(file)
elif file.type == 'application/vnd.openxmlformats-officedocument.wordprocessingml.document':
# file is .docs
raw_text += get_docs_file(file)
# get the text chunks
text_chunks = get_text_chunks(doc_list)
# create vector store
vectorstore = get_vectorstore(text_chunks)
# create conversation chain
st.session_state.conversation = get_conversation_chain(
vectorstore)
if __name__ == '__main__':
main()
|