Spaces:
Running
Running
Asankhaya Sharma
commited on
Commit
·
1ca7761
1
Parent(s):
c23d6d1
fix issues
Browse files
main.py
CHANGED
@@ -8,9 +8,6 @@ from langchain.embeddings import HuggingFaceInferenceAPIEmbeddings
|
|
8 |
from langchain.vectorstores import SupabaseVectorStore
|
9 |
from supabase import Client, create_client
|
10 |
from stats import add_usage
|
11 |
-
from langchain.llms import HuggingFaceEndpoint
|
12 |
-
from langchain.chains import ConversationalRetrievalChain
|
13 |
-
from langchain.memory import ConversationBufferMemory
|
14 |
|
15 |
supabase_url = st.secrets.SUPABASE_URL
|
16 |
supabase_key = st.secrets.SUPABASE_KEY
|
@@ -38,72 +35,42 @@ if openai_api_key:
|
|
38 |
if anthropic_api_key:
|
39 |
models += ["claude-v1", "claude-v1.3",
|
40 |
"claude-instant-v1-100k", "claude-instant-v1.1-100k"]
|
|
|
41 |
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
endpoint_url=endpoint_url,
|
55 |
-
task="text-generation",
|
56 |
-
huggingfacehub_api_token=hf_api_key,
|
57 |
-
model_kwargs=model_kwargs
|
58 |
-
)
|
59 |
-
memory = ConversationBufferMemory(memory_key="chat_history", input_key='question', output_key='answer', return_messages=True)
|
60 |
-
qa = ConversationalRetrievalChain.from_llm(hf, retriever=vector_store.as_retriever(search_kwargs={"score_threshold": 0.8, "k": 4,"filter": {"user": username}}), memory=memory, return_source_documents=True)
|
61 |
-
model_response = qa({"question": query})
|
62 |
-
# print( model_response["answer"])
|
63 |
-
sources = model_response["source_documents"]
|
64 |
-
# print(sources)
|
65 |
-
if len(sources) > 0:
|
66 |
-
json = {"response": model_response["answer"]}
|
67 |
-
st.code(json, language="json")
|
68 |
-
else:
|
69 |
-
json = {"response": "I am sorry, I do not have enough information to provide an answer. If there is a public source of data that you would like to add, please email [email protected]."}
|
70 |
-
st.code(json, language="json")
|
71 |
-
memory.clear()
|
72 |
-
else:
|
73 |
-
# Set the theme
|
74 |
-
st.set_page_config(
|
75 |
-
page_title="Securade.ai - Safety Copilot",
|
76 |
-
page_icon="https://securade.ai/favicon.ico",
|
77 |
-
layout="centered",
|
78 |
-
initial_sidebar_state="collapsed",
|
79 |
-
menu_items={
|
80 |
-
"About": "# Securade.ai Safety Copilot v0.1\n [https://securade.ai](https://securade.ai)",
|
81 |
-
"Get Help" : "https://securade.ai",
|
82 |
-
"Report a Bug": "mailto:[email protected]"
|
83 |
-
}
|
84 |
-
)
|
85 |
|
86 |
-
|
87 |
|
88 |
-
|
89 |
-
|
90 |
|
91 |
-
|
92 |
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
|
107 |
-
|
108 |
|
109 |
-
|
|
|
8 |
from langchain.vectorstores import SupabaseVectorStore
|
9 |
from supabase import Client, create_client
|
10 |
from stats import add_usage
|
|
|
|
|
|
|
11 |
|
12 |
supabase_url = st.secrets.SUPABASE_URL
|
13 |
supabase_key = st.secrets.SUPABASE_KEY
|
|
|
35 |
if anthropic_api_key:
|
36 |
models += ["claude-v1", "claude-v1.3",
|
37 |
"claude-instant-v1-100k", "claude-instant-v1.1-100k"]
|
38 |
+
|
39 |
|
40 |
+
# Set the theme
|
41 |
+
st.set_page_config(
|
42 |
+
page_title="Securade.ai - Safety Copilot",
|
43 |
+
page_icon="https://securade.ai/favicon.ico",
|
44 |
+
layout="centered",
|
45 |
+
initial_sidebar_state="collapsed",
|
46 |
+
menu_items={
|
47 |
+
"About": "# Securade.ai Safety Copilot v0.1\n [https://securade.ai](https://securade.ai)",
|
48 |
+
"Get Help" : "https://securade.ai",
|
49 |
+
"Report a Bug": "mailto:[email protected]"
|
50 |
+
}
|
51 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
|
53 |
+
st.title("👷♂️ Safety Copilot 🦺")
|
54 |
|
55 |
+
st.markdown("Chat with your personal safety assistant about any health & safety related queries.")
|
56 |
+
st.markdown("Up-to-date with latest OSH regulations for Singapore, Indonesia, Malaysia & other parts of Asia.")
|
57 |
|
58 |
+
st.markdown("---\n\n")
|
59 |
|
60 |
+
# Initialize session state variables
|
61 |
+
if 'model' not in st.session_state:
|
62 |
+
st.session_state['model'] = "meta-llama/Llama-2-70b-chat-hf"
|
63 |
+
if 'temperature' not in st.session_state:
|
64 |
+
st.session_state['temperature'] = 0.1
|
65 |
+
if 'chunk_size' not in st.session_state:
|
66 |
+
st.session_state['chunk_size'] = 500
|
67 |
+
if 'chunk_overlap' not in st.session_state:
|
68 |
+
st.session_state['chunk_overlap'] = 0
|
69 |
+
if 'max_tokens' not in st.session_state:
|
70 |
+
st.session_state['max_tokens'] = 500
|
71 |
+
if 'username' not in st.session_state:
|
72 |
+
st.session_state['username'] = username
|
73 |
|
74 |
+
chat_with_doc(st.session_state['model'], vector_store, stats_db=supabase)
|
75 |
|
76 |
+
st.markdown("---\n\n")
|