Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,137 +1,18 @@
|
|
1 |
-
from langchain.agents import AgentExecutor, AgentType, initialize_agent
|
2 |
-
from langchain.agents.structured_chat.prompt import SUFFIX
|
3 |
-
from langchain.chat_models import ChatOpenAI
|
4 |
-
from langchain.memory import ConversationBufferMemory
|
5 |
-
from tools import generate_image_tool
|
6 |
-
|
7 |
import chainlit as cl
|
8 |
-
from
|
9 |
-
from chainlit.input_widget import Select, Switch, Slider
|
10 |
-
|
11 |
-
|
12 |
-
@cl.author_rename
|
13 |
-
def rename(orig_author):
|
14 |
-
"""
|
15 |
-
Rename the author of messages as displayed in the "Thinking" section.
|
16 |
-
|
17 |
-
This is useful to make the chat look more natural, or add some fun to it!
|
18 |
-
"""
|
19 |
-
mapping = {
|
20 |
-
"AgentExecutor": "The LLM Brain",
|
21 |
-
"LLMChain": "The Assistant",
|
22 |
-
"GenerateImage": "DALL-E 3",
|
23 |
-
"ChatOpenAI": "GPT-4 Turbo",
|
24 |
-
"Chatbot": "Coolest App",
|
25 |
-
}
|
26 |
-
return mapping.get(orig_author, orig_author)
|
27 |
-
|
28 |
-
|
29 |
-
@cl.cache
|
30 |
-
def get_memory():
|
31 |
-
"""
|
32 |
-
This is used to track the conversation history and allow our agent to
|
33 |
-
remember what was said before.
|
34 |
-
"""
|
35 |
-
return ConversationBufferMemory(memory_key="chat_history")
|
36 |
|
37 |
|
38 |
@cl.on_chat_start
|
39 |
async def start():
|
40 |
"""
|
41 |
This is called when the Chainlit chat is started!
|
42 |
-
|
43 |
-
We can add some settings to our application to allow users to select the appropriate model, and more!
|
44 |
"""
|
45 |
-
|
46 |
-
[
|
47 |
-
Select(
|
48 |
-
id="Model",
|
49 |
-
label="OpenAI - Model",
|
50 |
-
values=["gpt-3.5-turbo", "gpt-4-1106-preview"],
|
51 |
-
initial_index=1,
|
52 |
-
),
|
53 |
-
Switch(id="Streaming", label="OpenAI - Stream Tokens", initial=True),
|
54 |
-
Slider(
|
55 |
-
id="Temperature",
|
56 |
-
label="OpenAI - Temperature",
|
57 |
-
initial=0,
|
58 |
-
min=0,
|
59 |
-
max=2,
|
60 |
-
step=0.1,
|
61 |
-
),
|
62 |
-
]
|
63 |
-
).send()
|
64 |
-
await setup_agent(settings)
|
65 |
-
|
66 |
-
|
67 |
-
@cl.on_settings_update
|
68 |
-
async def setup_agent(settings):
|
69 |
-
print("Setup agent with following settings: ", settings)
|
70 |
-
|
71 |
-
# We set up our agent with the user selected (or default) settings here.
|
72 |
-
llm = ChatOpenAI(
|
73 |
-
temperature=settings["Temperature"],
|
74 |
-
streaming=settings["Streaming"],
|
75 |
-
model=settings["Model"],
|
76 |
-
)
|
77 |
-
|
78 |
-
# We get our memory here, which is used to track the conversation history.
|
79 |
-
memory = get_memory()
|
80 |
-
|
81 |
-
# This suffix is used to provide the chat history to the prompt.
|
82 |
-
_SUFFIX = "Chat history:\n{chat_history}\n\n" + SUFFIX
|
83 |
-
|
84 |
-
# We initialize our agent here, which is simply being used to decide between responding with text
|
85 |
-
# or an image
|
86 |
-
agent = initialize_agent(
|
87 |
-
llm=llm, # our LLM (default is GPT-4 Turbo)
|
88 |
-
tools=[
|
89 |
-
generate_image_tool
|
90 |
-
], # our custom tool used to generate images with DALL-E 3
|
91 |
-
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION, # the agent type we're using today
|
92 |
-
memory=memory, # our memory!
|
93 |
-
agent_kwargs={
|
94 |
-
"suffix": _SUFFIX, # adding our chat history suffix
|
95 |
-
"input_variables": ["input", "agent_scratchpad", "chat_history"],
|
96 |
-
},
|
97 |
-
)
|
98 |
-
cl.user_session.set("agent", agent) # storing our agent in the user session
|
99 |
-
|
100 |
|
101 |
@cl.on_message
|
102 |
-
|
103 |
"""
|
104 |
-
This
|
105 |
-
move through our agent flow to generate a response.
|
106 |
-
|
107 |
-
There are ultimately two different options for the agent to respond with:
|
108 |
-
1. Text
|
109 |
-
2. Image
|
110 |
-
|
111 |
-
If the agent responds with text, we simply send the text back to the user.
|
112 |
-
|
113 |
-
If the agent responds with an image, we need to generate the image and send
|
114 |
-
it back to the user.
|
115 |
"""
|
116 |
-
|
117 |
-
cl.
|
118 |
-
|
119 |
-
res = await cl.make_async(agent.run)(
|
120 |
-
input=message.content, callbacks=[cl.LangchainCallbackHandler()]
|
121 |
-
)
|
122 |
-
|
123 |
-
elements = []
|
124 |
-
actions = []
|
125 |
-
|
126 |
-
generated_image_name = cl.user_session.get("generated_image")
|
127 |
-
generated_image = cl.user_session.get(generated_image_name)
|
128 |
-
if generated_image:
|
129 |
-
elements = [
|
130 |
-
cl.Image(
|
131 |
-
content=generated_image,
|
132 |
-
name=generated_image_name,
|
133 |
-
display="inline",
|
134 |
-
)
|
135 |
-
]
|
136 |
-
|
137 |
-
await cl.Message(content=res, elements=elements, actions=actions).send()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import chainlit as cl
|
2 |
+
from extract_app import extract_information
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
|
5 |
@cl.on_chat_start
|
6 |
async def start():
|
7 |
"""
|
8 |
This is called when the Chainlit chat is started!
|
|
|
|
|
9 |
"""
|
10 |
+
await cl.Message("Welcome to the information extraction chat!")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
@cl.on_message
|
13 |
+
def main(message: cl.Message):
|
14 |
"""
|
15 |
+
This is called when a message is received!
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
"""
|
17 |
+
res = extract_information().invoke(input=message.content)
|
18 |
+
cl.Message(content=res)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|