Spaces:
Paused
Paused
rcwaterman
commited on
Commit
•
b1aade6
1
Parent(s):
0a8a77f
Updating app
Browse files
app.py
CHANGED
@@ -224,9 +224,6 @@ workflow.add_conditional_edges(
|
|
224 |
}
|
225 |
)
|
226 |
|
227 |
-
workflow.add_edge("use tool", "agent")
|
228 |
-
app = workflow.compile()
|
229 |
-
|
230 |
@cl.author_rename
|
231 |
def rename(original_author: str):
|
232 |
"""
|
@@ -241,30 +238,10 @@ def rename(original_author: str):
|
|
241 |
|
242 |
@cl.on_chat_start
|
243 |
async def start_chat():
|
244 |
-
"""
|
245 |
-
|
246 |
-
|
247 |
-
We will build our LCEL RAG chain here, and store it in the user session.
|
248 |
|
249 |
-
|
250 |
-
"""
|
251 |
-
|
252 |
-
#-----DEFINE THE AGENT INVOCATION-----#
|
253 |
-
rag_agent = (
|
254 |
-
# INVOKE CHAIN WITH: {"question" : "<<SOME USER QUESTION>>"}
|
255 |
-
# "question" : populated by getting the value of the "question" key
|
256 |
-
# "context" : populated by getting the value of the "question" key and chaining it into the base_retriever
|
257 |
-
{"context": itemgetter("question") | retriever, "question": itemgetter("question")}
|
258 |
-
# "context" : is assigned to a RunnablePassthrough object (will not be called or considered in the next step)
|
259 |
-
# by getting the value of the "context" key from the previous step
|
260 |
-
| RunnablePassthrough.assign(context=itemgetter("context"))
|
261 |
-
# "response" : the "context" and "question" values are used to format our prompt object and then piped
|
262 |
-
# into the LLM and stored in a key called "response"
|
263 |
-
# "context" : populated by getting the value of the "context" key from the previous step
|
264 |
-
| {"response": prompt | primary_qa_llm, "context": itemgetter("context")}
|
265 |
-
)
|
266 |
-
|
267 |
-
cl.user_session.set("agent", agent)
|
268 |
|
269 |
@cl.on_message
|
270 |
async def main(message: cl.Message):
|
@@ -275,15 +252,13 @@ async def main(message: cl.Message):
|
|
275 |
|
276 |
The LCEL RAG chain is stored in the user session, and is unique to each user session - this is why we can access it here.
|
277 |
"""
|
278 |
-
|
279 |
|
280 |
msg = cl.Message(content="")
|
281 |
|
282 |
-
|
283 |
-
|
284 |
-
messages = app.invoke({"messages" : [HumanMessage(content=message)]})
|
285 |
|
286 |
-
async for chunk in
|
287 |
{"query": message.content},
|
288 |
config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]),
|
289 |
):
|
|
|
224 |
}
|
225 |
)
|
226 |
|
|
|
|
|
|
|
227 |
@cl.author_rename
|
228 |
def rename(original_author: str):
|
229 |
"""
|
|
|
238 |
|
239 |
@cl.on_chat_start
|
240 |
async def start_chat():
|
241 |
+
workflow.add_edge("use tool", "agent")
|
242 |
+
app = workflow.compile()
|
|
|
|
|
243 |
|
244 |
+
cl.user_session.set("agent", app)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
245 |
|
246 |
@cl.on_message
|
247 |
async def main(message: cl.Message):
|
|
|
252 |
|
253 |
The LCEL RAG chain is stored in the user session, and is unique to each user session - this is why we can access it here.
|
254 |
"""
|
255 |
+
agent = cl.user_session.get("agent")
|
256 |
|
257 |
msg = cl.Message(content="")
|
258 |
|
259 |
+
messages = agent.invoke({"messages" : [HumanMessage(content=message)]})
|
|
|
|
|
260 |
|
261 |
+
async for chunk in agent.astream(
|
262 |
{"query": message.content},
|
263 |
config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]),
|
264 |
):
|