Update app.py
Browse files
app.py
CHANGED
@@ -2,8 +2,11 @@ import streamlit as st
|
|
2 |
from dotenv import load_dotenv
|
3 |
from PyPDF2 import PdfReader
|
4 |
from langchain.text_splitter import CharacterTextSplitter
|
5 |
-
from langchain.embeddings import OpenAIEmbeddings
|
6 |
from langchain.vectorstores import FAISS
|
|
|
|
|
|
|
7 |
|
8 |
def get_html(html):
|
9 |
text = ""
|
@@ -19,17 +22,36 @@ def get_chunk_text(raw_text):
|
|
19 |
return chunks
|
20 |
|
21 |
def get_vector_store(text_chunks):
|
22 |
-
embeddings = OpenAIEmbeddings
|
|
|
23 |
vector_store = FAISS.from_texts(texts=text_chunks,embedding = embeddings)
|
24 |
return vector_store
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
|
|
|
|
|
|
26 |
|
27 |
|
28 |
def main():
|
29 |
load_dotenv()
|
30 |
st.set_page_config(page_title="Reads your html",page_icon=":books:")
|
|
|
|
|
|
|
31 |
st.header("Get your best Element")
|
32 |
-
st.text_input("Pass your Element with its information")
|
|
|
|
|
33 |
|
34 |
with st.sidebar:
|
35 |
st.subheader("your html")
|
@@ -46,7 +68,12 @@ def main():
|
|
46 |
|
47 |
|
48 |
#create vector store
|
49 |
-
|
|
|
|
|
|
|
|
|
|
|
50 |
|
51 |
if __name__ == '__main__':
|
52 |
main()
|
|
|
2 |
from dotenv import load_dotenv
|
3 |
from PyPDF2 import PdfReader
|
4 |
from langchain.text_splitter import CharacterTextSplitter
|
5 |
+
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings
|
6 |
from langchain.vectorstores import FAISS
|
7 |
+
from langchain.memory import ConversationBufferMemory
|
8 |
+
from langchain.chains import ConversationRetrievalChain
|
9 |
+
from langchain.llms import ChatOpenAI
|
10 |
|
11 |
def get_html(html):
|
12 |
text = ""
|
|
|
22 |
return chunks
|
23 |
|
24 |
def get_vector_store(text_chunks):
|
25 |
+
# embeddings = OpenAIEmbeddings()
|
26 |
+
embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl")
|
27 |
vector_store = FAISS.from_texts(texts=text_chunks,embedding = embeddings)
|
28 |
return vector_store
|
29 |
+
|
30 |
+
def get_conversation_chain(vector_store):
|
31 |
+
llm = ChatOpenAI()
|
32 |
+
memory = ConversationBufferMemory(memory_key="chat_history",return_messages=True)
|
33 |
+
conversation_chain = ConversationRetrievalChain.from_llm(
|
34 |
+
llm = llm,
|
35 |
+
retriever = vector_store.as_retriever(),
|
36 |
+
memory = memory
|
37 |
+
)
|
38 |
+
return conversation_chain
|
39 |
|
40 |
+
def handle_input(user_input):
|
41 |
+
response = st.session_state.conversation({"question":user_input})
|
42 |
+
st.write(response)
|
43 |
|
44 |
|
45 |
def main():
|
46 |
load_dotenv()
|
47 |
st.set_page_config(page_title="Reads your html",page_icon=":books:")
|
48 |
+
|
49 |
+
if "conversation" not in st.session_state:
|
50 |
+
st.session_state.conversation = None
|
51 |
st.header("Get your best Element")
|
52 |
+
user_input = st.text_input("Pass your Element with its information")
|
53 |
+
if user_input:
|
54 |
+
handle_input(user_input)
|
55 |
|
56 |
with st.sidebar:
|
57 |
st.subheader("your html")
|
|
|
68 |
|
69 |
|
70 |
#create vector store
|
71 |
+
vector_store = get_vector_store(text_chunks)
|
72 |
+
|
73 |
+
#create conversation chain
|
74 |
+
st.session_state.conversation = get_conversation_chain(vector_store)
|
75 |
+
|
76 |
+
|
77 |
|
78 |
if __name__ == '__main__':
|
79 |
main()
|