Upload 7 files
Browse files- .env.example +2 -0
- .gitignore +160 -0
- .python-version +1 -0
- app.py +104 -0
- htmlTemplates.py +44 -0
- readme.md +65 -0
- requirements.txt +14 -0
.env.example
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
OPENAI_API_KEY=
|
2 |
+
HUGGINGFACEHUB_API_TOKEN=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
|
.gitignore
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
.Python
|
11 |
+
build/
|
12 |
+
develop-eggs/
|
13 |
+
dist/
|
14 |
+
downloads/
|
15 |
+
eggs/
|
16 |
+
.eggs/
|
17 |
+
lib/
|
18 |
+
lib64/
|
19 |
+
parts/
|
20 |
+
sdist/
|
21 |
+
var/
|
22 |
+
wheels/
|
23 |
+
share/python-wheels/
|
24 |
+
*.egg-info/
|
25 |
+
.installed.cfg
|
26 |
+
*.egg
|
27 |
+
MANIFEST
|
28 |
+
|
29 |
+
# PyInstaller
|
30 |
+
# Usually these files are written by a python script from a template
|
31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
32 |
+
*.manifest
|
33 |
+
*.spec
|
34 |
+
|
35 |
+
# Installer logs
|
36 |
+
pip-log.txt
|
37 |
+
pip-delete-this-directory.txt
|
38 |
+
|
39 |
+
# Unit test / coverage reports
|
40 |
+
htmlcov/
|
41 |
+
.tox/
|
42 |
+
.nox/
|
43 |
+
.coverage
|
44 |
+
.coverage.*
|
45 |
+
.cache
|
46 |
+
nosetests.xml
|
47 |
+
coverage.xml
|
48 |
+
*.cover
|
49 |
+
*.py,cover
|
50 |
+
.hypothesis/
|
51 |
+
.pytest_cache/
|
52 |
+
cover/
|
53 |
+
|
54 |
+
# Translations
|
55 |
+
*.mo
|
56 |
+
*.pot
|
57 |
+
|
58 |
+
# Django stuff:
|
59 |
+
*.log
|
60 |
+
local_settings.py
|
61 |
+
db.sqlite3
|
62 |
+
db.sqlite3-journal
|
63 |
+
|
64 |
+
# Flask stuff:
|
65 |
+
instance/
|
66 |
+
.webassets-cache
|
67 |
+
|
68 |
+
# Scrapy stuff:
|
69 |
+
.scrapy
|
70 |
+
|
71 |
+
# Sphinx documentation
|
72 |
+
docs/_build/
|
73 |
+
|
74 |
+
# PyBuilder
|
75 |
+
.pybuilder/
|
76 |
+
target/
|
77 |
+
|
78 |
+
# Jupyter Notebook
|
79 |
+
.ipynb_checkpoints
|
80 |
+
|
81 |
+
# IPython
|
82 |
+
profile_default/
|
83 |
+
ipython_config.py
|
84 |
+
|
85 |
+
# pyenv
|
86 |
+
# For a library or package, you might want to ignore these files since the code is
|
87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
88 |
+
# .python-version
|
89 |
+
|
90 |
+
# pipenv
|
91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
94 |
+
# install all needed dependencies.
|
95 |
+
#Pipfile.lock
|
96 |
+
|
97 |
+
# poetry
|
98 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
100 |
+
# commonly ignored for libraries.
|
101 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
102 |
+
#poetry.lock
|
103 |
+
|
104 |
+
# pdm
|
105 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
106 |
+
#pdm.lock
|
107 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
108 |
+
# in version control.
|
109 |
+
# https://pdm.fming.dev/#use-with-ide
|
110 |
+
.pdm.toml
|
111 |
+
|
112 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
113 |
+
__pypackages__/
|
114 |
+
|
115 |
+
# Celery stuff
|
116 |
+
celerybeat-schedule
|
117 |
+
celerybeat.pid
|
118 |
+
|
119 |
+
# SageMath parsed files
|
120 |
+
*.sage.py
|
121 |
+
|
122 |
+
# Environments
|
123 |
+
.env
|
124 |
+
.venv
|
125 |
+
env/
|
126 |
+
venv/
|
127 |
+
ENV/
|
128 |
+
env.bak/
|
129 |
+
venv.bak/
|
130 |
+
|
131 |
+
# Spyder project settings
|
132 |
+
.spyderproject
|
133 |
+
.spyproject
|
134 |
+
|
135 |
+
# Rope project settings
|
136 |
+
.ropeproject
|
137 |
+
|
138 |
+
# mkdocs documentation
|
139 |
+
/site
|
140 |
+
|
141 |
+
# mypy
|
142 |
+
.mypy_cache/
|
143 |
+
.dmypy.json
|
144 |
+
dmypy.json
|
145 |
+
|
146 |
+
# Pyre type checker
|
147 |
+
.pyre/
|
148 |
+
|
149 |
+
# pytype static type analyzer
|
150 |
+
.pytype/
|
151 |
+
|
152 |
+
# Cython debug symbols
|
153 |
+
cython_debug/
|
154 |
+
|
155 |
+
# PyCharm
|
156 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
157 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
158 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
159 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
160 |
+
.idea
|
.python-version
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
3.9
|
app.py
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
from PyPDF2 import PdfReader
|
4 |
+
from langchain.text_splitter import CharacterTextSplitter
|
5 |
+
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings
|
6 |
+
from langchain.vectorstores import FAISS
|
7 |
+
from langchain.chat_models import ChatOpenAI
|
8 |
+
from langchain.memory import ConversationBufferMemory
|
9 |
+
from langchain.chains import ConversationalRetrievalChain
|
10 |
+
from htmlTemplates import css, bot_template, user_template
|
11 |
+
from langchain.llms import HuggingFaceHub
|
12 |
+
|
13 |
+
def get_pdf_text(pdf_docs):
|
14 |
+
text = ""
|
15 |
+
for pdf in pdf_docs:
|
16 |
+
pdf_reader = PdfReader(pdf)
|
17 |
+
for page in pdf_reader.pages:
|
18 |
+
text += page.extract_text()
|
19 |
+
return text
|
20 |
+
|
21 |
+
|
22 |
+
def get_text_chunks(text):
|
23 |
+
text_splitter = CharacterTextSplitter(
|
24 |
+
separator="\n",
|
25 |
+
chunk_size=1000,
|
26 |
+
chunk_overlap=200,
|
27 |
+
length_function=len
|
28 |
+
)
|
29 |
+
chunks = text_splitter.split_text(text)
|
30 |
+
return chunks
|
31 |
+
|
32 |
+
|
33 |
+
def get_vectorstore(text_chunks):
|
34 |
+
# embeddings = OpenAIEmbeddings()
|
35 |
+
embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl")
|
36 |
+
vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
|
37 |
+
return vectorstore
|
38 |
+
|
39 |
+
|
40 |
+
def get_conversation_chain(vectorstore):
|
41 |
+
# llm = ChatOpenAI()
|
42 |
+
llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512})
|
43 |
+
|
44 |
+
memory = ConversationBufferMemory(
|
45 |
+
memory_key='chat_history', return_messages=True)
|
46 |
+
conversation_chain = ConversationalRetrievalChain.from_llm(
|
47 |
+
llm=llm,
|
48 |
+
retriever=vectorstore.as_retriever(),
|
49 |
+
memory=memory
|
50 |
+
)
|
51 |
+
return conversation_chain
|
52 |
+
|
53 |
+
|
54 |
+
def handle_userinput(user_question):
|
55 |
+
response = st.session_state.conversation({'question': user_question})
|
56 |
+
st.session_state.chat_history = response['chat_history']
|
57 |
+
|
58 |
+
for i, message in enumerate(st.session_state.chat_history):
|
59 |
+
if i % 2 == 0:
|
60 |
+
st.write(user_template.replace(
|
61 |
+
"{{MSG}}", message.content), unsafe_allow_html=True)
|
62 |
+
else:
|
63 |
+
st.write(bot_template.replace(
|
64 |
+
"{{MSG}}", message.content), unsafe_allow_html=True)
|
65 |
+
|
66 |
+
|
67 |
+
def main():
|
68 |
+
load_dotenv()
|
69 |
+
st.set_page_config(page_title="Chat with multiple PDFs",
|
70 |
+
page_icon=":books:")
|
71 |
+
st.write(css, unsafe_allow_html=True)
|
72 |
+
|
73 |
+
if "conversation" not in st.session_state:
|
74 |
+
st.session_state.conversation = None
|
75 |
+
if "chat_history" not in st.session_state:
|
76 |
+
st.session_state.chat_history = None
|
77 |
+
|
78 |
+
st.header("Chat with multiple PDFs :books:")
|
79 |
+
user_question = st.text_input("Ask a question about your documents:")
|
80 |
+
if user_question:
|
81 |
+
handle_userinput(user_question)
|
82 |
+
|
83 |
+
with st.sidebar:
|
84 |
+
st.subheader("Your documents")
|
85 |
+
pdf_docs = st.file_uploader(
|
86 |
+
"Upload your PDFs here and click on 'Process'", accept_multiple_files=True)
|
87 |
+
if st.button("Process"):
|
88 |
+
with st.spinner("Processing"):
|
89 |
+
# get pdf text
|
90 |
+
raw_text = get_pdf_text(pdf_docs)
|
91 |
+
|
92 |
+
# get the text chunks
|
93 |
+
text_chunks = get_text_chunks(raw_text)
|
94 |
+
|
95 |
+
# create vector store
|
96 |
+
vectorstore = get_vectorstore(text_chunks)
|
97 |
+
|
98 |
+
# create conversation chain
|
99 |
+
st.session_state.conversation = get_conversation_chain(
|
100 |
+
vectorstore)
|
101 |
+
|
102 |
+
|
103 |
+
if __name__ == '__main__':
|
104 |
+
main()
|
htmlTemplates.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
css = '''
|
2 |
+
<style>
|
3 |
+
.chat-message {
|
4 |
+
padding: 1.5rem; border-radius: 0.5rem; margin-bottom: 1rem; display: flex
|
5 |
+
}
|
6 |
+
.chat-message.user {
|
7 |
+
background-color: #2b313e
|
8 |
+
}
|
9 |
+
.chat-message.bot {
|
10 |
+
background-color: #475063
|
11 |
+
}
|
12 |
+
.chat-message .avatar {
|
13 |
+
width: 20%;
|
14 |
+
}
|
15 |
+
.chat-message .avatar img {
|
16 |
+
max-width: 78px;
|
17 |
+
max-height: 78px;
|
18 |
+
border-radius: 50%;
|
19 |
+
object-fit: cover;
|
20 |
+
}
|
21 |
+
.chat-message .message {
|
22 |
+
width: 80%;
|
23 |
+
padding: 0 1.5rem;
|
24 |
+
color: #fff;
|
25 |
+
}
|
26 |
+
'''
|
27 |
+
|
28 |
+
bot_template = '''
|
29 |
+
<div class="chat-message bot">
|
30 |
+
<div class="avatar">
|
31 |
+
<img src="https://i.ibb.co/cN0nmSj/Screenshot-2023-05-28-at-02-37-21.png" style="max-height: 78px; max-width: 78px; border-radius: 50%; object-fit: cover;">
|
32 |
+
</div>
|
33 |
+
<div class="message">{{MSG}}</div>
|
34 |
+
</div>
|
35 |
+
'''
|
36 |
+
|
37 |
+
user_template = '''
|
38 |
+
<div class="chat-message user">
|
39 |
+
<div class="avatar">
|
40 |
+
<img src="https://i.ibb.co/rdZC7LZ/Photo-logo-1.png">
|
41 |
+
</div>
|
42 |
+
<div class="message">{{MSG}}</div>
|
43 |
+
</div>
|
44 |
+
'''
|
readme.md
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# MultiPDF Chat App
|
2 |
+
|
3 |
+
> You can find the tutorial for this project on [YouTube](https://youtu.be/dXxQ0LR-3Hg).
|
4 |
+
|
5 |
+
## Introduction
|
6 |
+
------------
|
7 |
+
The MultiPDF Chat App is a Python application that allows you to chat with multiple PDF documents. You can ask questions about the PDFs using natural language, and the application will provide relevant responses based on the content of the documents. This app utilizes a language model to generate accurate answers to your queries. Please note that the app will only respond to questions related to the loaded PDFs.
|
8 |
+
|
9 |
+
## How It Works
|
10 |
+
------------
|
11 |
+
|
12 |
+
![MultiPDF Chat App Diagram](./docs/PDF-LangChain.jpg)
|
13 |
+
|
14 |
+
The application follows these steps to provide responses to your questions:
|
15 |
+
|
16 |
+
1. PDF Loading: The app reads multiple PDF documents and extracts their text content.
|
17 |
+
|
18 |
+
2. Text Chunking: The extracted text is divided into smaller chunks that can be processed effectively.
|
19 |
+
|
20 |
+
3. Language Model: The application utilizes a language model to generate vector representations (embeddings) of the text chunks.
|
21 |
+
|
22 |
+
4. Similarity Matching: When you ask a question, the app compares it with the text chunks and identifies the most semantically similar ones.
|
23 |
+
|
24 |
+
5. Response Generation: The selected chunks are passed to the language model, which generates a response based on the relevant content of the PDFs.
|
25 |
+
|
26 |
+
## Dependencies and Installation
|
27 |
+
----------------------------
|
28 |
+
To install the MultiPDF Chat App, please follow these steps:
|
29 |
+
|
30 |
+
1. Clone the repository to your local machine.
|
31 |
+
|
32 |
+
2. Install the required dependencies by running the following command:
|
33 |
+
```
|
34 |
+
pip install -r requirements.txt
|
35 |
+
```
|
36 |
+
|
37 |
+
3. Obtain an API key from OpenAI and add it to the `.env` file in the project directory.
|
38 |
+
```commandline
|
39 |
+
OPENAI_API_KEY=your_secrit_api_key
|
40 |
+
```
|
41 |
+
|
42 |
+
## Usage
|
43 |
+
-----
|
44 |
+
To use the MultiPDF Chat App, follow these steps:
|
45 |
+
|
46 |
+
1. Ensure that you have installed the required dependencies and added the OpenAI API key to the `.env` file.
|
47 |
+
|
48 |
+
2. Run the `main.py` file using the Streamlit CLI. Execute the following command:
|
49 |
+
```
|
50 |
+
streamlit run app.py
|
51 |
+
```
|
52 |
+
|
53 |
+
3. The application will launch in your default web browser, displaying the user interface.
|
54 |
+
|
55 |
+
4. Load multiple PDF documents into the app by following the provided instructions.
|
56 |
+
|
57 |
+
5. Ask questions in natural language about the loaded PDFs using the chat interface.
|
58 |
+
|
59 |
+
## Contributing
|
60 |
+
------------
|
61 |
+
This repository is intended for educational purposes and does not accept further contributions. It serves as supporting material for a YouTube tutorial that demonstrates how to build this project. Feel free to utilize and enhance the app based on your own requirements.
|
62 |
+
|
63 |
+
## License
|
64 |
+
-------
|
65 |
+
The MultiPDF Chat App is released under the [MIT License](https://opensource.org/licenses/MIT).
|
requirements.txt
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
langchain==0.0.184
|
2 |
+
PyPDF2==3.0.1
|
3 |
+
python-dotenv==1.0.0
|
4 |
+
streamlit==1.18.1
|
5 |
+
openai==0.27.6
|
6 |
+
faiss-cpu==1.7.4
|
7 |
+
altair==4
|
8 |
+
tiktoken==0.4.0
|
9 |
+
# uncomment to use huggingface llms
|
10 |
+
huggingface-hub==0.14.1
|
11 |
+
|
12 |
+
uncomment to use instructor embeddings
|
13 |
+
InstructorEmbedding==1.0.1
|
14 |
+
sentence-transformers==2.2.2
|