satvikjain commited on
Commit
4a92a4e
·
verified ·
1 Parent(s): d3b0d57

updated gradio interface

Browse files
Files changed (1) hide show
  1. app.py +115 -115
app.py CHANGED
@@ -1,115 +1,115 @@
1
-
2
- from langchain.text_splitter import CharacterTextSplitter
3
- from langchain_community.document_loaders import TextLoader
4
- from langchain_huggingface import HuggingFaceEmbeddings
5
- from langchain_groq import ChatGroq
6
- import langchain_community.vectorstores
7
- from pinecone import Pinecone, ServerlessSpec
8
- from dotenv import load_dotenv
9
- import os
10
- from langchain_core.prompts import PromptTemplate
11
- from langchain.schema.runnable import RunnablePassthrough
12
- from langchain.schema.output_parser import StrOutputParser
13
- import gradio as gr
14
-
15
- class ChatBot():
16
- load_dotenv()
17
- # loader = DirectoryLoader('data', glob="*.md")
18
- loader = TextLoader('data.txt', encoding = 'UTF-8')
19
- documents = loader.load()
20
- text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=600)
21
- docs = text_splitter.split_documents(documents)
22
-
23
- embeddings = HuggingFaceEmbeddings()
24
-
25
- load_dotenv()
26
- # Initialize Pinecone client
27
- pc = Pinecone(
28
- api_key=os.environ.get("PINECONE_API_KEY")
29
- )
30
-
31
- # Define Index Name
32
- index_name = "noqs-chatbot"
33
-
34
- # Checking Index
35
- if index_name not in pc.list_indexes().names():
36
- # Create new Index
37
- pc.create_index(name=index_name,
38
- metric="cosine",
39
- dimension=768,
40
- spec=ServerlessSpec(
41
- cloud="aws",
42
- region="us-east-1"
43
- ))
44
-
45
- docsearch = langchain_community.vectorstores.Pinecone.from_documents(docs, embeddings, index_name=index_name)
46
- else:
47
- # Link to the existing index
48
- docsearch = langchain_community.vectorstores.Pinecone.from_existing_index(index_name, embeddings)
49
-
50
- # Define the repo ID and connect to a model on Groq API
51
- model_id = "llama3-8b-8192"
52
- llm = ChatGroq(
53
- model=model_id,
54
- temperature=0.1,
55
- max_tokens=600
56
- )
57
-
58
- # Creating prompt response template
59
- template = """
60
- You are a knowledgeable assistant for NoQs Digital. The Manager of our company is Mr. Adit Agarwal. Users will ask you questions about our company, and you must use the given context to answer their questions accurately. Follow these guidelines:
61
- Always base your answers on the provided context. Do not make up information.
62
- If the context does not contain the answer, simply say, "I don't know based on the provided information."
63
- Offer detailed and thorough responses, but stay relevant to the user's question.
64
- Maintain a professional tone in your responses.
65
- You have to answer like a Customer Care Specialist.
66
-
67
- Context: {context}
68
-
69
- User Question: {question}
70
-
71
- Answer:
72
- """
73
-
74
-
75
- prompt = PromptTemplate(
76
- template=template,
77
- input_variables=["context", "question"]
78
- )
79
-
80
- # Chaining llm and prompt
81
- rag_chain = (
82
- {"context": docsearch.as_retriever(), "question": RunnablePassthrough()}
83
- | prompt
84
- | llm
85
- | StrOutputParser()
86
- )
87
-
88
- bot = ChatBot()
89
-
90
- # def chat_function(prompt):
91
- # user_input = prompt
92
- # result = bot.rag_chain.invoke(user_input)
93
- # return result
94
-
95
- def chat_function(prompts,history):
96
- user_input = prompts
97
- result = bot.rag_chain.invoke(user_input)
98
- return result
99
-
100
-
101
- iface = gr.ChatInterface(
102
- fn=chat_function,
103
- textbox = gr.Textbox(placeholder="Enter message here", container=False, scale = 7),
104
- chatbot= gr.Chatbot(height=400),
105
- title="NoQs Chatbot",
106
- examples=[
107
- ["Hello, What are the Internship oportunities at NoQs"],
108
- ["What services do you offer?"],
109
- ["How do I contact you?"],
110
- ["Can I schedule a meet with Mr. Adit?"],
111
- ["Can you provide me the Internship Repository and other important links"],
112
- ["Tell me the difference between your programs"]
113
- ]
114
- )
115
- iface.launch(share = True)
 
1
+
2
+ from langchain.text_splitter import CharacterTextSplitter
3
+ from langchain_community.document_loaders import TextLoader
4
+ from langchain_huggingface import HuggingFaceEmbeddings
5
+ from langchain_groq import ChatGroq
6
+ import langchain_community.vectorstores
7
+ from pinecone import Pinecone, ServerlessSpec
8
+ from dotenv import load_dotenv
9
+ import os
10
+ from langchain_core.prompts import PromptTemplate
11
+ from langchain.schema.runnable import RunnablePassthrough
12
+ from langchain.schema.output_parser import StrOutputParser
13
+ import gradio as gr
14
+
15
+ class ChatBot():
16
+ load_dotenv()
17
+ # loader = DirectoryLoader('data', glob="*.md")
18
+ loader = TextLoader('data.txt', encoding = 'UTF-8')
19
+ documents = loader.load()
20
+ text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=600)
21
+ docs = text_splitter.split_documents(documents)
22
+
23
+ embeddings = HuggingFaceEmbeddings()
24
+
25
+ load_dotenv()
26
+ # Initialize Pinecone client
27
+ pc = Pinecone(
28
+ api_key=os.environ.get("PINECONE_API_KEY")
29
+ )
30
+
31
+ # Define Index Name
32
+ index_name = "noqs-chatbot"
33
+
34
+ # Checking Index
35
+ if index_name not in pc.list_indexes().names():
36
+ # Create new Index
37
+ pc.create_index(name=index_name,
38
+ metric="cosine",
39
+ dimension=768,
40
+ spec=ServerlessSpec(
41
+ cloud="aws",
42
+ region="us-east-1"
43
+ ))
44
+
45
+ docsearch = langchain_community.vectorstores.Pinecone.from_documents(docs, embeddings, index_name=index_name)
46
+ else:
47
+ # Link to the existing index
48
+ docsearch = langchain_community.vectorstores.Pinecone.from_existing_index(index_name, embeddings)
49
+
50
+ # Define the repo ID and connect to a model on Groq API
51
+ model_id = "llama3-8b-8192"
52
+ llm = ChatGroq(
53
+ model=model_id,
54
+ temperature=0.5,
55
+ max_tokens=600
56
+ )
57
+
58
+ # Creating prompt response template
59
+ template = """
60
+ You are a knowledgeable assistant for NoQs Digital. The Manager of our company is Mr. Adit Agarwal. Users will ask you questions about our company, and you must use the given context to answer their questions accurately. Follow these guidelines:
61
+ Always base your answers on the provided context. Do not make up information.
62
+ If the context does not contain the answer, simply say, "I don't know based on the provided information."
63
+ Offer detailed and thorough responses, but stay relevant to the user's question.
64
+ Maintain a professional tone in your responses.
65
+ You have to answer like a Customer Care Specialist.
66
+ If someone answer about program A just answer politelly that due to high demand we are currently not offering program A.
67
+
68
+ Context: {context}
69
+
70
+ User Question: {question}
71
+
72
+ Answer:
73
+ """
74
+
75
+
76
+ prompt = PromptTemplate(
77
+ template=template,
78
+ input_variables=["context", "question"]
79
+ )
80
+
81
+ # Chaining llm and prompt
82
+ rag_chain = (
83
+ {"context": docsearch.as_retriever(), "question": RunnablePassthrough()}
84
+ | prompt
85
+ | llm
86
+ | StrOutputParser()
87
+ )
88
+
89
+ bot = ChatBot()
90
+
91
+ # def chat_function(prompt):
92
+ # user_input = prompt
93
+ # result = bot.rag_chain.invoke(user_input)
94
+ # return result
95
+
96
+ def chat_function(prompts,history):
97
+ user_input = prompts
98
+ result = bot.rag_chain.invoke(user_input)
99
+ return result
100
+
101
+
102
+ iface = gr.ChatInterface(
103
+ fn=chat_function,
104
+ textbox = gr.Textbox(placeholder="Enter message here", container=False, scale = 7),
105
+ chatbot= gr.Chatbot(height=400),
106
+ title="NoQs Chatbot",
107
+ examples=[
108
+ ["Hello, What are the Internship oportunities at NoQs"],
109
+ ["How do I contact you?"],
110
+ ["Can I schedule a meet with Mr. Adit?"],
111
+ ["Can you provide me the Internship Repository and other important links"],
112
+ ["Tell me the difference between your programs"]
113
+ ]
114
+ )
115
+ iface.launch(share = True)