Spaces:
Sleeping
Sleeping
File size: 4,011 Bytes
1667809 3473a69 1667809 3473a69 1667809 3473a69 1667809 3473a69 1667809 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 |
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_groq import ChatGroq
import langchain_community.vectorstores
from pinecone import Pinecone, ServerlessSpec
from dotenv import load_dotenv
import os
from langchain_core.prompts import PromptTemplate
from langchain.schema.runnable import RunnablePassthrough
from langchain.schema.output_parser import StrOutputParser
import gradio as gr
class ChatBot():
load_dotenv()
# loader = DirectoryLoader('data', glob="*.md")
loader = TextLoader('data.txt', encoding = 'UTF-8')
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=600)
docs = text_splitter.split_documents(documents)
embeddings = HuggingFaceEmbeddings()
load_dotenv()
# Initialize Pinecone client
pc = Pinecone(
api_key=os.environ.get("PINECONE_API_KEY")
)
# Define Index Name
index_name = "noqs-chatbot"
# Checking Index
if index_name not in pc.list_indexes().names():
# Create new Index
pc.create_index(name=index_name,
metric="cosine",
dimension=768,
spec=ServerlessSpec(
cloud="aws",
region="us-east-1"
))
docsearch = langchain_community.vectorstores.Pinecone.from_documents(docs, embeddings, index_name=index_name)
else:
# Link to the existing index
docsearch = langchain_community.vectorstores.Pinecone.from_existing_index(index_name, embeddings)
# Define the repo ID and connect to a model on Groq API
model_id = "llama3-8b-8192"
llm = ChatGroq(
model=model_id,
temperature=0.1,
max_tokens=600
)
# Creating prompt response template
template = """
You are a knowledgeable assistant for NoQs Digital. The Manager of our company is Mr. Adit Agarwal. Users will ask you questions about our company, and you must use the given context to answer their questions accurately. Follow these guidelines:
Always base your answers on the provided context. Do not make up information.
If the context does not contain the answer, simply say, "I don't know based on the provided information."
Offer detailed and thorough responses, but stay relevant to the user's question.
Maintain a professional tone in your responses.
You have to answer like a Customer Care Specialist.
Context: {context}
User Question: {question}
Answer:
"""
prompt = PromptTemplate(
template=template,
input_variables=["context", "question"]
)
# Chaining llm and prompt
rag_chain = (
{"context": docsearch.as_retriever(), "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
bot = ChatBot()
# def chat_function(prompt):
# user_input = prompt
# result = bot.rag_chain.invoke(user_input)
# return result
def chat_function(prompts,history):
user_input = prompts
result = bot.rag_chain.invoke(user_input)
return result
iface = gr.ChatInterface(
fn=chat_function,
textbox = gr.Textbox(placeholder="Enter message here", container=False, scale = 7),
chatbot= gr.Chatbot(height=400),
title="NoQs Chatbot",
examples=[
["Hello, What are the Internship oportunities at NoQs"],
["What services do you offer?"],
["How do I contact you?"],
["Can I schedule a meet with Mr. Adit?"],
["Can you provide me the Internship Repository and other important links"],
["Tell me the difference between your programs"]
]
)
iface.launch(share = True)
|