Spaces:
Sleeping
Sleeping
Gopikanth123
commited on
Update main.py
Browse files
main.py
CHANGED
@@ -6,6 +6,7 @@ from llama_index.llms.huggingface import HuggingFaceInferenceAPI
|
|
6 |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
7 |
from huggingface_hub import InferenceClient
|
8 |
from transformers import AutoTokenizer, AutoModel
|
|
|
9 |
|
10 |
|
11 |
# Ensure HF_TOKEN is set
|
@@ -66,14 +67,55 @@ def data_ingestion_from_directory():
|
|
66 |
# Persist the new index
|
67 |
index.storage_context.persist(persist_dir=PERSIST_DIR)
|
68 |
|
69 |
-
def handle_query(query):
|
70 |
-
|
71 |
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
chat_text_qa_msgs = [
|
78 |
(
|
79 |
"user",
|
@@ -85,34 +127,26 @@ def handle_query(query):
|
|
85 |
"""
|
86 |
)
|
87 |
]
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
|
92 |
-
|
93 |
-
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
|
94 |
-
index = load_index_from_storage(storage_context)
|
95 |
-
# context_str = ""
|
96 |
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
current_chat_history.append((query, response))
|
116 |
return response
|
117 |
|
118 |
app = Flask(__name__)
|
@@ -121,11 +155,35 @@ app = Flask(__name__)
|
|
121 |
data_ingestion_from_directory()
|
122 |
|
123 |
# Generate Response
|
124 |
-
def generate_response(query):
|
125 |
try:
|
126 |
# Call the handle_query function to get the response
|
127 |
-
bot_response = handle_query(query)
|
128 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
129 |
except Exception as e:
|
130 |
return f"Error fetching the response: {str(e)}"
|
131 |
|
@@ -138,11 +196,12 @@ def index():
|
|
138 |
@app.route('/chat', methods=['POST'])
|
139 |
def chat():
|
140 |
try:
|
141 |
-
user_message = request.json.get("message")
|
|
|
142 |
if not user_message:
|
143 |
return jsonify({"response": "Please say something!"})
|
144 |
|
145 |
-
bot_response = generate_response(user_message)
|
146 |
return jsonify({"response": bot_response})
|
147 |
except Exception as e:
|
148 |
return jsonify({"response": f"An error occurred: {str(e)}"})
|
|
|
6 |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
7 |
from huggingface_hub import InferenceClient
|
8 |
from transformers import AutoTokenizer, AutoModel
|
9 |
+
from deep_translator import GoogleTranslator
|
10 |
|
11 |
|
12 |
# Ensure HF_TOKEN is set
|
|
|
67 |
# Persist the new index
|
68 |
index.storage_context.persist(persist_dir=PERSIST_DIR)
|
69 |
|
70 |
+
# def handle_query(query):
|
71 |
+
# context_str = ""
|
72 |
|
73 |
+
# # Build context from current chat history
|
74 |
+
# for past_query, response in reversed(current_chat_history):
|
75 |
+
# if past_query.strip():
|
76 |
+
# context_str += f"User asked: '{past_query}'\nBot answered: '{response}'\n"
|
77 |
|
78 |
+
# chat_text_qa_msgs = [
|
79 |
+
# (
|
80 |
+
# "user",
|
81 |
+
# """
|
82 |
+
# You are the Taj Hotel voice chatbot and your name is Taj hotel helper. Your goal is to provide accurate, professional, and helpful answers to user queries based on the Taj hotel data. Always ensure your responses are clear and concise. Give response within 10-15 words only. You need to give an answer in the same language used by the user.
|
83 |
+
# {context_str}
|
84 |
+
# Question:
|
85 |
+
# {query_str}
|
86 |
+
# """
|
87 |
+
# )
|
88 |
+
# ]
|
89 |
+
|
90 |
+
|
91 |
+
|
92 |
+
# text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
|
93 |
+
|
94 |
+
# storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
|
95 |
+
# index = load_index_from_storage(storage_context)
|
96 |
+
# # context_str = ""
|
97 |
+
|
98 |
+
# # # Build context from current chat history
|
99 |
+
# # for past_query, response in reversed(current_chat_history):
|
100 |
+
# # if past_query.strip():
|
101 |
+
# # context_str += f"User asked: '{past_query}'\nBot answered: '{response}'\n"
|
102 |
+
|
103 |
+
# query_engine = index.as_query_engine(text_qa_template=text_qa_template, context_str=context_str)
|
104 |
+
# print(f"Querying: {query}")
|
105 |
+
# answer = query_engine.query(query)
|
106 |
+
|
107 |
+
# # Extracting the response
|
108 |
+
# if hasattr(answer, 'response'):
|
109 |
+
# response = answer.response
|
110 |
+
# elif isinstance(answer, dict) and 'response' in answer:
|
111 |
+
# response = answer['response']
|
112 |
+
# else:
|
113 |
+
# response = "I'm sorry, I couldn't find an answer to that."
|
114 |
+
|
115 |
+
# # Append to chat history
|
116 |
+
# current_chat_history.append((query, response))
|
117 |
+
# return response
|
118 |
+
def handle_query(query):
|
119 |
chat_text_qa_msgs = [
|
120 |
(
|
121 |
"user",
|
|
|
127 |
"""
|
128 |
)
|
129 |
]
|
130 |
+
text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
|
132 |
+
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
|
133 |
+
index = load_index_from_storage(storage_context)
|
134 |
+
context_str = ""
|
135 |
+
for past_query, response in reversed(current_chat_history):
|
136 |
+
if past_query.strip():
|
137 |
+
context_str += f"User asked: '{past_query}'\nBot answered: '{response}'\n"
|
138 |
+
|
139 |
+
query_engine = index.as_query_engine(text_qa_template=text_qa_template, context_str=context_str)
|
140 |
+
print(query)
|
141 |
+
answer = query_engine.query(query)
|
142 |
+
|
143 |
+
if hasattr(answer, 'response'):
|
144 |
+
response = answer.response
|
145 |
+
elif isinstance(answer, dict) and 'response' in answer:
|
146 |
+
response = answer['response']
|
147 |
+
else:
|
148 |
+
response = "Sorry, I couldn't find an answer."
|
149 |
+
current_chat_history.append((query, response))
|
|
|
150 |
return response
|
151 |
|
152 |
app = Flask(__name__)
|
|
|
155 |
data_ingestion_from_directory()
|
156 |
|
157 |
# Generate Response
|
158 |
+
def generate_response(query,language):
|
159 |
try:
|
160 |
# Call the handle_query function to get the response
|
161 |
+
bot_response = handle_query(query)
|
162 |
+
indian_languages = {
|
163 |
+
"hindi": "hi",
|
164 |
+
"bengali": "bn",
|
165 |
+
"telugu": "te",
|
166 |
+
"marathi": "mr",
|
167 |
+
"tamil": "ta",
|
168 |
+
"gujarati": "gu",
|
169 |
+
"kannada": "kn",
|
170 |
+
"malayalam": "ml",
|
171 |
+
"punjabi": "pa",
|
172 |
+
"odia": "or",
|
173 |
+
"urdu": "ur",
|
174 |
+
"assamese": "as",
|
175 |
+
"sanskrit": "sa"
|
176 |
+
}
|
177 |
+
translated_text = bot_response
|
178 |
+
try:
|
179 |
+
translated_text = GoogleTranslator(source='en', target=indian_languages[language]).translate(bot_response)
|
180 |
+
print(translated_text)
|
181 |
+
except Exception as e:
|
182 |
+
# Handle translation errors
|
183 |
+
print(f"Translation error: {e}")
|
184 |
+
translated_response = "Sorry, I couldn't translate the response."
|
185 |
+
chat_history.append((query,translated_text))
|
186 |
+
return translated_text
|
187 |
except Exception as e:
|
188 |
return f"Error fetching the response: {str(e)}"
|
189 |
|
|
|
196 |
@app.route('/chat', methods=['POST'])
|
197 |
def chat():
|
198 |
try:
|
199 |
+
user_message = request.json.get("message")
|
200 |
+
language = request.json.get("language")
|
201 |
if not user_message:
|
202 |
return jsonify({"response": "Please say something!"})
|
203 |
|
204 |
+
bot_response = generate_response(user_message,language)
|
205 |
return jsonify({"response": bot_response})
|
206 |
except Exception as e:
|
207 |
return jsonify({"response": f"An error occurred: {str(e)}"})
|