Spaces:
Running
Running
import streamlit as st | |
import langchain_core | |
from langchain_core.messages import AIMessage, HumanMessage | |
from langchain_community.document_loaders import WebBaseLoader | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
from langchain_community.vectorstores import Chroma | |
# from langchain_openai import OpenAIEmbeddings, ChatOpenAI | |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder | |
from langchain.chains import create_history_aware_retriever, create_retrieval_chain | |
from langchain.chains.combine_documents import create_stuff_documents_chain | |
from langchain_community.embeddings import HuggingFaceBgeEmbeddings | |
from langchain_community.llms import CTransformers | |
from ctransformers import AutoModelForCausalLM | |
from langchain.llms import HuggingFaceHub | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
from langchain.embeddings import HuggingFaceEmbeddings | |
from langchain.llms import HuggingFacePipeline | |
from transformers import pipeline | |
import os | |
import transformers | |
import torch | |
from langchain_community.llms import LlamaCpp | |
# from langchain_retrieval import BaseRetrieverChain | |
# from dotenv import load_dotenv | |
# load_dotenv() | |
def get_vector_store_from_url(url): | |
# model_name = "BAAI/bge-large-en" | |
# model_kwargs = {'device': 'cpu'} | |
# encode_kwargs = {'normalize_embeddings': False} | |
# embeddings = HuggingFaceBgeEmbeddings( | |
# model_name=model_name, | |
# model_kwargs=model_kwargs, | |
# encode_kwargs=encode_kwargs | |
# ) | |
embeddings = HuggingFaceEmbeddings(model_name='thenlper/gte-large', | |
model_kwargs={'device': 'cpu'}) | |
loader = WebBaseLoader(url) | |
document = loader.load() | |
# split the document into chunks | |
text_splitter = RecursiveCharacterTextSplitter() | |
document_chunks = text_splitter.split_documents(document) | |
# create a vectorstore from the chunks | |
# vector_store = Chroma.from_documents(document_chunks, OpenAIEmbeddings()) | |
vector_store = Chroma.from_documents(document_chunks, embeddings) | |
return vector_store | |
def get_context_retriever_chain(vector_store,llm): | |
# llm = ChatOpenAI() | |
llm = llm | |
retriever = vector_store.as_retriever() | |
prompt = ChatPromptTemplate.from_messages([ | |
MessagesPlaceholder(variable_name="chat_history"), | |
("user", "{input}"), | |
("user", "Given the above conversation, generate a search query to look up in order to get information relevant to the conversation") | |
]) | |
retriever_chain = create_history_aware_retriever(llm, retriever, prompt) | |
return retriever_chain | |
# def get_conversational_rag_chain(retriever_chain,llm): | |
# llm=llm | |
# template = "Answer the user's questions based on the below context:\n\n{context}" | |
# human_template = "{input}" | |
# prompt = ChatPromptTemplate.from_messages([ | |
# ("system", template), | |
# MessagesPlaceholder(variable_name="chat_history"), | |
# ("user", human_template), | |
# ]) | |
# stuff_documents_chain = create_stuff_documents_chain(llm,prompt) | |
# return create_retrieval_chain(retriever_chain, stuff_documents_chain) | |
def get_conversational_rag_chain(retriever_chain,llm): | |
if not retriever_chain: | |
raise ValueError("`retriever_chain` cannot be None or an empty object.") | |
template = "Answer the user's questions based on the below context:\n\n{context}" | |
human_template = "{input}" | |
prompt = ChatPromptTemplate.from_messages([ | |
("system", template), | |
MessagesPlaceholder(variable_name="chat_history"), | |
("user", human_template), | |
]) | |
def safe_llm(input_str: str) -> str: | |
if isinstance(input_str, langchain_core.prompts.chat.ChatPromptValue): | |
input_str = str(input_str) | |
# input_str = input_str.to_messages() | |
# Call the original llm, which should now work correctly | |
return llm(input_str) | |
stuff_documents_chain = create_stuff_documents_chain(safe_llm, prompt) | |
return create_retrieval_chain(retriever_chain, stuff_documents_chain) | |
def get_response(user_input): | |
# llm = CTransformers( | |
# # model = "TheBloke/Mistral-7B-Instruct-v0.2-GGUF", | |
# model= "TheBloke/Llama-2-7B-Chat-GGUF", | |
# model_file = "llama-2-7b-chat.Q3_K_S.gguf", | |
# model_type="llama", | |
# max_new_tokens = 300, | |
# temperature = 0.3, | |
# lib="avx2", # for CPU | |
# ) | |
# model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" | |
# # llm = HuggingFaceHub( | |
# # repo_id=llm_model, | |
# # model_kwargs={"temperature": 0.3, "max_new_tokens": 250, "top_k": 3} | |
# # ) | |
# llm = transformers.AutoModelForCausalLM.from_pretrained( | |
# model_name, | |
# trust_remote_code=True, | |
# torch_dtype=torch.bfloat16, | |
# device_map='auto' | |
# ) | |
# llm = HuggingFacePipeline.from_model_id( | |
# model_id="google/flan-t5-base", | |
# task="text2text-generation", | |
# # model_kwargs={"temperature": 0.2}, | |
# ) | |
# llm = HuggingFacePipeline.from_model_id( | |
# model_id="google-t5/t5-small", | |
# task="text2text-generation", | |
# # model_kwargs={"temperature": 0.2}, | |
# ) | |
# llm = pipeline(task="conversational", model="facebook/blenderbot-400M-distill") | |
llm = LlamaCpp( | |
model_path="tinyllama-1.1b-chat-v1.0.Q4_0.gguf", | |
temperature=0.75, | |
max_tokens=500, | |
top_p=1, | |
# callback_manager=callback_manager, | |
# verbose=True, # Verbose is required to pass to the callback manager | |
) | |
# llm = HuggingFacePipeline.from_model_id( | |
# model_id="lmsys/fastchat-t5-3b-v1.0", | |
# task="text2text-generation", | |
# # model_kwargs={"temperature": 0.2}, | |
# ) | |
retriever_chain = get_context_retriever_chain(st.session_state.vector_store,llm) | |
conversation_rag_chain = get_conversational_rag_chain(retriever_chain,llm) | |
response = conversation_rag_chain.invoke({ | |
"chat_history": st.session_state.chat_history, | |
"input": user_query | |
}) | |
return response['answer'] | |
# app config | |
st.set_page_config(page_title= "Chat with Websites", page_icon="🤖") | |
st.title("Chat with Websites") | |
#sidebar | |
with st.sidebar: | |
st.header("Settings") | |
website_url = st.text_input("Website URL") | |
# openai_apikey = st.text_input("Enter your OpenAI API key") | |
if (website_url is None or website_url == ""): | |
st.info("Please ensure if website URL is entered") | |
else: | |
if "chat_history" not in st.session_state: | |
st.session_state.chat_history = [ | |
AIMessage(content = "Hello, I am a bot. How can I help you"), | |
] | |
if "vector_store" not in st.session_state: | |
st.session_state.vector_store = get_vector_store_from_url(website_url) | |
#user_input | |
user_query = st.chat_input("Type your message here...") | |
if user_query is not None and user_query !="": | |
response = get_response(user_query) | |
st.session_state.chat_history.append(HumanMessage(content=user_query)) | |
st.session_state.chat_history.append(AIMessage(content=response)) | |
#conversation | |
for message in st.session_state.chat_history: | |
if isinstance(message, AIMessage): # checking if the messsage is the instance of an AI message | |
with st.chat_message("AI"): | |
st.write(message.content) | |
elif isinstance(message, HumanMessage): # checking if the messsage is the instance of a Human | |
with st.chat_message("Human"): | |
st.write(message.content) | |