Spaces:
Sleeping
Sleeping
updated gradio interface
Browse files
app.py
CHANGED
@@ -1,115 +1,115 @@
|
|
1 |
-
|
2 |
-
from langchain.text_splitter import CharacterTextSplitter
|
3 |
-
from langchain_community.document_loaders import TextLoader
|
4 |
-
from langchain_huggingface import HuggingFaceEmbeddings
|
5 |
-
from langchain_groq import ChatGroq
|
6 |
-
import langchain_community.vectorstores
|
7 |
-
from pinecone import Pinecone, ServerlessSpec
|
8 |
-
from dotenv import load_dotenv
|
9 |
-
import os
|
10 |
-
from langchain_core.prompts import PromptTemplate
|
11 |
-
from langchain.schema.runnable import RunnablePassthrough
|
12 |
-
from langchain.schema.output_parser import StrOutputParser
|
13 |
-
import gradio as gr
|
14 |
-
|
15 |
-
class ChatBot():
|
16 |
-
load_dotenv()
|
17 |
-
# loader = DirectoryLoader('data', glob="*.md")
|
18 |
-
loader = TextLoader('data.txt', encoding = 'UTF-8')
|
19 |
-
documents = loader.load()
|
20 |
-
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=600)
|
21 |
-
docs = text_splitter.split_documents(documents)
|
22 |
-
|
23 |
-
embeddings = HuggingFaceEmbeddings()
|
24 |
-
|
25 |
-
load_dotenv()
|
26 |
-
# Initialize Pinecone client
|
27 |
-
pc = Pinecone(
|
28 |
-
api_key=os.environ.get("PINECONE_API_KEY")
|
29 |
-
)
|
30 |
-
|
31 |
-
# Define Index Name
|
32 |
-
index_name = "noqs-chatbot"
|
33 |
-
|
34 |
-
# Checking Index
|
35 |
-
if index_name not in pc.list_indexes().names():
|
36 |
-
# Create new Index
|
37 |
-
pc.create_index(name=index_name,
|
38 |
-
metric="cosine",
|
39 |
-
dimension=768,
|
40 |
-
spec=ServerlessSpec(
|
41 |
-
cloud="aws",
|
42 |
-
region="us-east-1"
|
43 |
-
))
|
44 |
-
|
45 |
-
docsearch = langchain_community.vectorstores.Pinecone.from_documents(docs, embeddings, index_name=index_name)
|
46 |
-
else:
|
47 |
-
# Link to the existing index
|
48 |
-
docsearch = langchain_community.vectorstores.Pinecone.from_existing_index(index_name, embeddings)
|
49 |
-
|
50 |
-
# Define the repo ID and connect to a model on Groq API
|
51 |
-
model_id = "llama3-8b-8192"
|
52 |
-
llm = ChatGroq(
|
53 |
-
model=model_id,
|
54 |
-
temperature=0.
|
55 |
-
max_tokens=600
|
56 |
-
)
|
57 |
-
|
58 |
-
# Creating prompt response template
|
59 |
-
template = """
|
60 |
-
You are a knowledgeable assistant for NoQs Digital. The Manager of our company is Mr. Adit Agarwal. Users will ask you questions about our company, and you must use the given context to answer their questions accurately. Follow these guidelines:
|
61 |
-
Always base your answers on the provided context. Do not make up information.
|
62 |
-
If the context does not contain the answer, simply say, "I don't know based on the provided information."
|
63 |
-
Offer detailed and thorough responses, but stay relevant to the user's question.
|
64 |
-
Maintain a professional tone in your responses.
|
65 |
-
You have to answer like a Customer Care Specialist.
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
|
85 |
-
|
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
#
|
92 |
-
#
|
93 |
-
#
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
["What
|
109 |
-
["How do I contact you?"],
|
110 |
-
["Can I schedule a meet with Mr. Adit?"],
|
111 |
-
["Can you provide me the Internship Repository and other important links"],
|
112 |
-
["Tell me the difference between your programs"]
|
113 |
-
]
|
114 |
-
)
|
115 |
-
iface.launch(share = True)
|
|
|
1 |
+
|
2 |
+
from langchain.text_splitter import CharacterTextSplitter
|
3 |
+
from langchain_community.document_loaders import TextLoader
|
4 |
+
from langchain_huggingface import HuggingFaceEmbeddings
|
5 |
+
from langchain_groq import ChatGroq
|
6 |
+
import langchain_community.vectorstores
|
7 |
+
from pinecone import Pinecone, ServerlessSpec
|
8 |
+
from dotenv import load_dotenv
|
9 |
+
import os
|
10 |
+
from langchain_core.prompts import PromptTemplate
|
11 |
+
from langchain.schema.runnable import RunnablePassthrough
|
12 |
+
from langchain.schema.output_parser import StrOutputParser
|
13 |
+
import gradio as gr
|
14 |
+
|
15 |
+
class ChatBot():
|
16 |
+
load_dotenv()
|
17 |
+
# loader = DirectoryLoader('data', glob="*.md")
|
18 |
+
loader = TextLoader('data.txt', encoding = 'UTF-8')
|
19 |
+
documents = loader.load()
|
20 |
+
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=600)
|
21 |
+
docs = text_splitter.split_documents(documents)
|
22 |
+
|
23 |
+
embeddings = HuggingFaceEmbeddings()
|
24 |
+
|
25 |
+
load_dotenv()
|
26 |
+
# Initialize Pinecone client
|
27 |
+
pc = Pinecone(
|
28 |
+
api_key=os.environ.get("PINECONE_API_KEY")
|
29 |
+
)
|
30 |
+
|
31 |
+
# Define Index Name
|
32 |
+
index_name = "noqs-chatbot"
|
33 |
+
|
34 |
+
# Checking Index
|
35 |
+
if index_name not in pc.list_indexes().names():
|
36 |
+
# Create new Index
|
37 |
+
pc.create_index(name=index_name,
|
38 |
+
metric="cosine",
|
39 |
+
dimension=768,
|
40 |
+
spec=ServerlessSpec(
|
41 |
+
cloud="aws",
|
42 |
+
region="us-east-1"
|
43 |
+
))
|
44 |
+
|
45 |
+
docsearch = langchain_community.vectorstores.Pinecone.from_documents(docs, embeddings, index_name=index_name)
|
46 |
+
else:
|
47 |
+
# Link to the existing index
|
48 |
+
docsearch = langchain_community.vectorstores.Pinecone.from_existing_index(index_name, embeddings)
|
49 |
+
|
50 |
+
# Define the repo ID and connect to a model on Groq API
|
51 |
+
model_id = "llama3-8b-8192"
|
52 |
+
llm = ChatGroq(
|
53 |
+
model=model_id,
|
54 |
+
temperature=0.5,
|
55 |
+
max_tokens=600
|
56 |
+
)
|
57 |
+
|
58 |
+
# Creating prompt response template
|
59 |
+
template = """
|
60 |
+
You are a knowledgeable assistant for NoQs Digital. The Manager of our company is Mr. Adit Agarwal. Users will ask you questions about our company, and you must use the given context to answer their questions accurately. Follow these guidelines:
|
61 |
+
Always base your answers on the provided context. Do not make up information.
|
62 |
+
If the context does not contain the answer, simply say, "I don't know based on the provided information."
|
63 |
+
Offer detailed and thorough responses, but stay relevant to the user's question.
|
64 |
+
Maintain a professional tone in your responses.
|
65 |
+
You have to answer like a Customer Care Specialist.
|
66 |
+
If someone answer about program A just answer politelly that due to high demand we are currently not offering program A.
|
67 |
+
|
68 |
+
Context: {context}
|
69 |
+
|
70 |
+
User Question: {question}
|
71 |
+
|
72 |
+
Answer:
|
73 |
+
"""
|
74 |
+
|
75 |
+
|
76 |
+
prompt = PromptTemplate(
|
77 |
+
template=template,
|
78 |
+
input_variables=["context", "question"]
|
79 |
+
)
|
80 |
+
|
81 |
+
# Chaining llm and prompt
|
82 |
+
rag_chain = (
|
83 |
+
{"context": docsearch.as_retriever(), "question": RunnablePassthrough()}
|
84 |
+
| prompt
|
85 |
+
| llm
|
86 |
+
| StrOutputParser()
|
87 |
+
)
|
88 |
+
|
89 |
+
bot = ChatBot()
|
90 |
+
|
91 |
+
# def chat_function(prompt):
|
92 |
+
# user_input = prompt
|
93 |
+
# result = bot.rag_chain.invoke(user_input)
|
94 |
+
# return result
|
95 |
+
|
96 |
+
def chat_function(prompts,history):
|
97 |
+
user_input = prompts
|
98 |
+
result = bot.rag_chain.invoke(user_input)
|
99 |
+
return result
|
100 |
+
|
101 |
+
|
102 |
+
iface = gr.ChatInterface(
|
103 |
+
fn=chat_function,
|
104 |
+
textbox = gr.Textbox(placeholder="Enter message here", container=False, scale = 7),
|
105 |
+
chatbot= gr.Chatbot(height=400),
|
106 |
+
title="NoQs Chatbot",
|
107 |
+
examples=[
|
108 |
+
["Hello, What are the Internship oportunities at NoQs"],
|
109 |
+
["How do I contact you?"],
|
110 |
+
["Can I schedule a meet with Mr. Adit?"],
|
111 |
+
["Can you provide me the Internship Repository and other important links"],
|
112 |
+
["Tell me the difference between your programs"]
|
113 |
+
]
|
114 |
+
)
|
115 |
+
iface.launch(share = True)
|