Spaces:
Sleeping
Sleeping
File size: 5,620 Bytes
30ab543 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
import logging
from langchain_community.document_loaders import PyPDFLoader, DirectoryLoader
from langchain_huggingface import HuggingFaceEmbeddings
from sentence_transformers import SentenceTransformer
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import FAISS
from langchain.prompts import PromptTemplate
from langchain_together import Together
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
import streamlit as st
import os
from dotenv import load_dotenv
import warnings
logging.basicConfig(level=logging.DEBUG) # Logs at DEBUG level and above
logger = logging.getLogger(__name__)
logger.debug("Starting Streamlit app...")
# Suppress PyTorch FutureWarning
warnings.filterwarnings("ignore", message="You are using `torch.load` with `weights_only=False`")
warnings.filterwarnings("ignore", message="Tried to instantiate class '__path__._path'")
warnings.filterwarnings("ignore", category=FutureWarning)
# Suppress generic DeprecationWarnings (including LangChain)
warnings.filterwarnings("ignore", category=DeprecationWarning)
load_dotenv()
TOGETHER_AI_API = os.getenv("TOGETHER_AI")
# Streamlit Page Config
st.set_page_config(page_title="Law4her")
col1, col2, col3 = st.columns([1, 4, 1])
with col2:
st.image(
"https://res.cloudinary.com/dzzhbgbnp/image/upload/v1736073326/lawforher_logo1_yznqxr.png"
)
st.markdown(
"""
<style>
div.stButton > button:first-child {
background-color: #ffffff; /* White background */
color: #000000; /* Black text */
border: 1px solid #000000; /* Optional: Add a black border */
}
div.stButton > button:active {
background-color: #e0e0e0; /* Slightly darker white for active state */
color: #000000; /* Black text remains the same */
}
div[data-testid="stStatusWidget"] div button {
display: none;
}
.reportview-container {
margin-top: -2em;
}
#MainMenu {visibility: hidden;}
.stDeployButton {display:none;}
footer {visibility: hidden;}
#stDecoration {display:none;}
button[title="View fullscreen"]{
visibility: hidden;}
</style>
""",
unsafe_allow_html=True,
)
# Reset Conversation
def reset_conversation():
st.session_state.messages = [{"role": "assistant", "content": "Hi, how can I help you?"}]
st.session_state.memory.clear()
# Initialize chat messages and memory
if "messages" not in st.session_state:
st.session_state.messages = [{"role": "assistant", "content": "Hi, how can I help you?"}]
if "memory" not in st.session_state:
st.session_state.memory = ConversationBufferMemory(
memory_key="chat_history",
return_messages=True
)
# Load embeddings and vectorstore
embeddings = HuggingFaceEmbeddings(
model_name="nomic-ai/nomic-embed-text-v1",
model_kwargs={"trust_remote_code": True, "revision": "289f532e14dbbbd5a04753fa58739e9ba766f3c7"},
)
# Enable dangerous deserialization (safe only if the file is trusted and created by you)
db = FAISS.load_local("ipc_vector_db", embeddings, allow_dangerous_deserialization=True)
db_retriever = db.as_retriever(search_type="similarity", search_kwargs={"k": 2, "max_length": 512})
prompt_template = """<s>[INST]As a legal chatbot specializing in the Indian Penal Code, provide a concise and accurate answer based on the given context. Avoid unnecessary details or unrelated content. Only respond if the answer can be derived from the provided context; otherwise, say "The information is not available in the provided context."
CONTEXT: {context}
CHAT HISTORY: {chat_history}
QUESTION: {question}
ANSWER:
</s>[INST]
"""
prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question", "chat_history"])
# Initialize the Together API
llm = Together(
model="mistralai/Mistral-7B-Instruct-v0.2",
temperature=0.5,
max_tokens=1024,
together_api_key=TOGETHER_AI_API,
)
qa = ConversationalRetrievalChain.from_llm(
llm=llm,
memory=st.session_state.memory,
retriever=db_retriever,
combine_docs_chain_kwargs={"prompt": prompt},
)
# Display chat history
for message in st.session_state.messages:
with st.chat_message(message.get("role")):
st.write(message.get("content"))
# User input
input_prompt = st.chat_input("Ask a legal question about the Indian Penal Code")
if input_prompt:
with st.chat_message("user"):
st.write(input_prompt)
st.session_state.messages.append({"role": "user", "content": input_prompt})
with st.chat_message("assistant"):
with st.status("Thinking π‘...", expanded=True):
try:
# Pass the user question
result = qa.invoke(input=input_prompt)
full_response = result.get("answer", "")
# Ensure the answer is a string
if isinstance(full_response, list):
full_response = " ".join(full_response)
elif not isinstance(full_response, str):
full_response = str(full_response)
# Display the response
st.session_state.messages.append({"role": "assistant", "content": full_response})
st.write(full_response)
except Exception as e:
st.error(f"Error occurred: {e}")
# Add reset button
st.button("Reset All Chat π", on_click=reset_conversation)
|