bhys commited on
Commit
b628e9d
1 Parent(s): 0cdc5b8
GradioUI.py ADDED
@@ -0,0 +1,489 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import json
3
+
4
+ import autogen
5
+ from autogen import AssistantAgent
6
+
7
+ from autogensop.gradio_autogen_sop import GradioAutogenSop
8
+ from autogensop.coor_retrieve_agent import CoorRetrieveGoodsAgent
9
+
10
+ llm_config = [{
11
+ "model": "gpt-4-1106-preview",
12
+ "api_key": "输入您自己的<api_key>",
13
+ "base_url": "输入您自己的<base_url>"
14
+ }]
15
+ llm_dict = {item['model']: item for item in llm_config}
16
+
17
+
18
+ def build_llm_config(value):
19
+ global llm_config
20
+ llm_config = json.loads(value)
21
+ print(llm_config)
22
+ global llm_dict
23
+ llm_dict = {item['model']: item for item in llm_config}
24
+ return llm_config
25
+
26
+
27
+ llm_config_interface = gr.Interface(
28
+ build_llm_config,
29
+ inputs=gr.TextArea(
30
+ label="编辑LLM配置",
31
+ value=json.dumps(llm_config, indent=2),
32
+ ),
33
+ clear_btn=gr.Button(visible=False),
34
+ submit_btn="保存",
35
+ outputs=gr.JSON(value=llm_config, label="已保存的LLM配置"),
36
+ allow_flagging="never"
37
+ )
38
+
39
+ with gr.Blocks() as agent_config_interface:
40
+ agent_types = {
41
+ "AssistantAgent": AssistantAgent,
42
+ "CoorRetrieveGoodsAgent": CoorRetrieveGoodsAgent
43
+ }
44
+ user = autogen.UserProxyAgent(
45
+ name="User",
46
+ code_execution_config=False,
47
+ human_input_mode="ALWAYS"
48
+ )
49
+ salesperson = AssistantAgent(
50
+ name="Salesperson",
51
+ system_message=f"You are a helpful AI salesperson that remembers useful info from prior chats. If User proposed to buy something but Warehouse did't mention that thing, you can't be the next role. ",
52
+ llm_config={"config_list": llm_config},
53
+ )
54
+ # TODO: 用QAs初始化salesperson的记忆
55
+ warehouse = CoorRetrieveGoodsAgent(
56
+ name="Warehouse",
57
+ system_message=f"If the previous role was User and he proposed to buy something, you can return the goods. ",
58
+ llm_config={"config_list": llm_config},
59
+ )
60
+ # salesman = AssistantAgent(
61
+ # name="salesman",
62
+ # system_message=f"You are a helpful assistant,你能根据自身知识,给出真实世界的一些商品详细信息。",
63
+ # llm_config={"config_list": llm_config},
64
+ # )
65
+ assistant = AssistantAgent(
66
+ name="assistant",
67
+ system_message=f"You are a helpful assistant,你能根据用户需求给出解决方案。 ",
68
+ llm_config={"config_list": llm_config},
69
+ )
70
+ agents = [user, salesperson, warehouse, assistant]
71
+ edit_agent_index = 0
72
+
73
+ with gr.Row():
74
+ agent_list = gr.Gallery(
75
+ [("./assets/robot.png", item.name) for item in agents] + [("./assets/add.jpg", "Add new agent")],
76
+ columns=4,
77
+ allow_preview=False
78
+ )
79
+ with gr.Column():
80
+ agent_type = gr.Dropdown(agent_types.keys(), label="Agent类型", visible=False)
81
+ agent_name = gr.Textbox(placeholder="填写agent姓名", label="名称", visible=False)
82
+ agent_sys_msg = gr.Textbox(placeholder="填写agent的system message", label="system message", visible=False)
83
+ agent_llm = gr.Dropdown([item["model"] for item in llm_config], label="使用的LLM", visible=False)
84
+ agent_edit_btn = gr.Button("修改", visible=False)
85
+ agent_del_btn = gr.Button("删除", visible=False)
86
+ agent_add_btn = gr.Button("增加此Agent", visible=False)
87
+
88
+
89
+ def agentChange(evt: gr.SelectData):
90
+ index = evt.index
91
+ global edit_agent_index
92
+ edit_agent_index = index
93
+ if (index > len(agents) - 1):
94
+ agent_type = gr.Dropdown(agent_types.keys(), label="Agent类型", value=list(agent_types.keys())[0],
95
+ visible=True)
96
+ agent_name = gr.Textbox(placeholder="填写agent姓名", label="名称", visible=False)
97
+ agent_sys_msg = gr.Textbox(placeholder="填写agent的system message", label="system message", visible=False)
98
+ agent_llm = gr.Dropdown([item["model"] for item in llm_config], label="使用的LLM", visible=False)
99
+ agent_edit_btn = gr.Button("修改", visible=False)
100
+ agent_del_btn = gr.Button("删除", visible=False)
101
+ agent_add_btn = gr.Button("增加此Agent", visible=True)
102
+ elif (index == 0):
103
+ agent_type = gr.Dropdown(["UserProxyAgent"], value="UserProxyAgent", label="Agent类型", visible=True)
104
+ agent_name = gr.Textbox(placeholder="填写agent姓名", label="名称", visible=False)
105
+ agent_sys_msg = gr.Textbox(placeholder="填写agent的system message", label="system message", visible=False)
106
+ agent_llm = gr.Dropdown([item["model"] for item in llm_config], label="使用的LLM", visible=False)
107
+ agent_edit_btn = gr.Button("修改", visible=False)
108
+ agent_del_btn = gr.Button("删除", visible=False)
109
+ agent_add_btn = gr.Button("增加此Agent", visible=False)
110
+ else:
111
+ agent_name = gr.Textbox(placeholder="填写agent姓名", label="名称", value=agents[index].name, visible=True)
112
+ agent_sys_msg = gr.Textbox(placeholder="填写agent的system message", label="system message",
113
+ value=agents[index].system_message, visible=True)
114
+ agent_type = gr.Dropdown(
115
+ agent_types.keys(),
116
+ label="Agent类型",
117
+ value=agents[index].__class__.__name__,
118
+ visible=True
119
+ )
120
+ agent_llm = gr.Dropdown(
121
+ [item["model"] for item in llm_config],
122
+ label="使用的LLM",
123
+ value=agents[index].llm_config["config_list"][0]["model"],
124
+ visible=True
125
+ )
126
+ agent_edit_btn = gr.Button("修改", visible=True)
127
+ agent_del_btn = gr.Button("删除", visible=True)
128
+ agent_add_btn = gr.Button("增加此Agent", visible=False)
129
+ return agent_type, agent_name, agent_sys_msg, agent_llm, agent_edit_btn, agent_del_btn, agent_add_btn
130
+
131
+
132
+ def editBtn(type, name, sys_msg, llm):
133
+ agents[edit_agent_index] = agent_types[type](
134
+ name=name,
135
+ system_message=sys_msg,
136
+ llm_config={"config_list": [llm_dict[llm]]}
137
+ )
138
+ agent_list = gr.Gallery(
139
+ [("./assets/robot.png", item.name) for item in agents] + [("./assets/add.jpg", "Add new agent")],
140
+ columns=4,
141
+ allow_preview=False
142
+ )
143
+ return agent_list
144
+
145
+
146
+ def delBtn():
147
+ del agents[edit_agent_index]
148
+ agent_list = gr.Gallery(
149
+ [("./assets/robot.png", item.name) for item in agents] + [("./assets/add.jpg", "Add new agent")],
150
+ columns=4,
151
+ allow_preview=False
152
+ )
153
+ agent_edit_btn = gr.Button("修改", visible=False)
154
+ agent_del_btn = gr.Button("删除", visible=False)
155
+ agent_add_btn = gr.Button("增加此Agent", visible=False)
156
+ return agent_list, agent_edit_btn, agent_del_btn, agent_add_btn
157
+
158
+
159
+ def addBtn(type):
160
+ index = edit_agent_index
161
+ newAgent = agent_types[type](
162
+ name="agent" + str(index + 1),
163
+ system_message="You are a helpful assistant",
164
+ llm_config={"config_list": [llm_config[0]]}
165
+ )
166
+ agents.append(newAgent)
167
+ agent_list = gr.Gallery(
168
+ [("./assets/robot.png", item.name) for item in agents] + [("./assets/add.jpg", "Add new agent")],
169
+ columns=4,
170
+ allow_preview=False
171
+ )
172
+ agent_name = gr.Textbox(placeholder="填写agent姓名", label="名称", value=agents[index].name, visible=True)
173
+ agent_sys_msg = gr.Textbox(placeholder="填写agent的system message", label="system message",
174
+ value=agents[index].system_message, visible=True)
175
+ agent_type = gr.Dropdown(
176
+ agent_types.keys(),
177
+ label="Agent类型",
178
+ value=agents[index].__class__.__name__,
179
+ visible=True
180
+ )
181
+ agent_llm = gr.Dropdown(
182
+ [item["model"] for item in llm_config],
183
+ label="使用的LLM",
184
+ value=agents[index].llm_config["config_list"][0]["model"],
185
+ visible=True
186
+ )
187
+ agent_edit_btn = gr.Button("修改", visible=True)
188
+ agent_del_btn = gr.Button("删除", visible=True)
189
+ agent_add_btn = gr.Button("增加此Agent", visible=False)
190
+ return agent_list, agent_type, agent_name, agent_sys_msg, agent_llm, agent_edit_btn, agent_del_btn, agent_add_btn
191
+
192
+
193
+ agent_list.select(
194
+ agentChange,
195
+ None,
196
+ [agent_type, agent_name, agent_sys_msg, agent_llm, agent_edit_btn, agent_del_btn, agent_add_btn]
197
+ )
198
+ agent_edit_btn.click(
199
+ editBtn,
200
+ [agent_type, agent_name, agent_sys_msg, agent_llm],
201
+ agent_list
202
+ )
203
+ agent_del_btn.click(delBtn, None, [agent_list, agent_edit_btn, agent_del_btn, agent_add_btn])
204
+ agent_add_btn.click(
205
+ addBtn,
206
+ agent_type,
207
+ [agent_list, agent_type, agent_name, agent_sys_msg, agent_llm, agent_edit_btn, agent_del_btn, agent_add_btn]
208
+ )
209
+
210
+ with gr.Blocks() as sop_config_interface:
211
+ target = "Salesperson声明下单成功且买家没有放弃购买,或者买家已经阐述了为何不购买的原因。"
212
+ states = {
213
+ "初步介绍": {
214
+ "start_condition": """买家首次咨询且没有明确提问时。""",
215
+ "participate_agent_names": ["Salesperson", "User"],
216
+ "sys_msg": {
217
+ "User": "",
218
+ "Salesperson": "引导用户购买百货商品"},
219
+ "examples": [],
220
+ },
221
+ "确认需求": {
222
+ "start_condition": """1. 用户提供更详细的需求信息,如具体规格、功能要求等;2. 用户描述他们的问题或挑战,并寻求解决方案;3. 用户表达他们的目标和期望,以及对产品或服务的期望效果;4. 用户咨询产品细节""",
223
+ "participate_agent_names": ["User", "Warehouse", "Salesperson"],
224
+ "sys_msg": {
225
+ "User": "",
226
+ "Salesperson": "了解客户对产品的要求和应用方式,按照客户需求推荐合适产品;",
227
+ "Warehouse": "用户需要挑选商品时,你将所知道的商品信息全部返回", },
228
+ "examples": [],
229
+ },
230
+ "完成订单": {
231
+ "start_condition": """用户表示购买某个商品。""",
232
+ "participate_agent_names": ["User", "Warehouse", "Salesperson"],
233
+ "sys_msg": {
234
+ "User": "",
235
+ "Salesperson": "仅回复:下单成功;",
236
+ "Warehouse": "", },
237
+ "examples": [],
238
+ },
239
+ "反思": {
240
+ "start_condition": """用户表示不购买或放弃购买。""",
241
+ "participate_agent_names": ["User", "Salesperson"],
242
+ "sys_msg": {
243
+ "User": "",
244
+ "Salesperson": "复盘沟通过程,反思输单原因,必要时询问用户为什么不选择自己;"},
245
+ "examples": [],
246
+ },
247
+ }
248
+ # states = {'明确需求': {'start_condition': '用户没有想要购买某件具体的商品。',
249
+ # 'participate_agent_names': ['User', 'assistant'],
250
+ # 'sys_msg': {'User': '', 'assistant': '给出多条建议,帮助用户确定自己的需求。'},
251
+ # 'examples': []}, '议价': {'start_condition': '用户想要购买某件具体的商品。',
252
+ # 'participate_agent_names': ['User', 'salesman'],
253
+ # 'sys_msg': {'User': '',
254
+ # 'salesman': '在不能亏本的前提下成交,并尽可能赚取更多的钱。'},
255
+ # 'examples': []}}
256
+ sop_llm_set = llm_dict["gpt-4-1106-preview"]
257
+ edit_state_name = ""
258
+
259
+ sop_target = gr.Textbox(label="结束条件", info="每当用户输入完毕,判断结束条件是否达成,达成则整个SOP流程结束",
260
+ value=target)
261
+ sop_llm = gr.Dropdown(
262
+ [item["model"] for item in llm_config],
263
+ label="流程控制LLM",
264
+ info="用此LLM判断SOP所处环节和是否结束",
265
+ value=llm_config[0]["model"]
266
+ )
267
+ with gr.Row():
268
+ state_list = gr.Gallery(
269
+ [("./assets/state.png", key) for key, item in states.items()] + [("./assets/add.jpg", "Add new state")],
270
+ columns=4,
271
+ allow_preview=False
272
+ )
273
+ with gr.Column():
274
+ sop_name = gr.Textbox(placeholder="填写环节名称", label="环节名称", visible=False)
275
+ sop_start_condition = gr.Textbox(placeholder="填写进入此业务环节的条件", label="进入条件", visible=False)
276
+ sop_participate_agent_names = gr.Dropdown([agent.name for agent in agents], label="参与此环节的agents",
277
+ multiselect=True, visible=False)
278
+ sop_sys_msg = gr.TextArea(label="agent任务", info="各agent在此环节的任务,无特殊任务的agent可留白不填写",
279
+ visible=False)
280
+ sop_edit_btn = gr.Button("修改", visible=False)
281
+ sop_del_btn = gr.Button("删除", visible=False)
282
+ sop_add_btn = gr.Button("增加此Agent", visible=False)
283
+
284
+
285
+ def stateChange(evt: gr.SelectData):
286
+ index = evt.index
287
+ if (index > len(states.keys()) - 1):
288
+ sop_name = gr.Textbox(placeholder="填写环节名称", label="环节名称", value="", visible=True)
289
+ sop_start_condition = gr.Textbox(placeholder="填写进入此业务环节的条件", label="进入条件", visible=False)
290
+ sop_participate_agent_names = gr.Dropdown([agent.name for agent in agents], label="参与此环节的agents",
291
+ multiselect=True, visible=False)
292
+ sop_sys_msg = gr.TextArea(label="agent任务", info="各agent在此环节的任务,无特殊任务的agent可留白不填写",
293
+ visible=False)
294
+ sop_edit_btn = gr.Button("修改", visible=False)
295
+ sop_del_btn = gr.Button("删除", visible=False)
296
+ sop_add_btn = gr.Button("增加此环节", visible=True)
297
+ else:
298
+ name = list(states.keys())[index]
299
+ global edit_state_name
300
+ edit_state_name = name
301
+ sop_name = gr.Textbox(placeholder="填写环节名称", label="环节名称", value=edit_state_name, visible=True)
302
+ sop_start_condition = gr.Textbox(placeholder="填写进入此业务环节的条件", label="进入条件",
303
+ value=states[edit_state_name]["start_condition"], visible=True)
304
+ sop_participate_agent_names = gr.Dropdown([agent.name for agent in agents], label="参与此环节的agents",
305
+ value=states[edit_state_name]["participate_agent_names"],
306
+ multiselect=True, visible=True)
307
+ sop_sys_msg = gr.TextArea(label="agent任务", info="各agent在此环节的任务,无特殊任务的agent可留白不填写",
308
+ value=json.dumps(states[edit_state_name]["sys_msg"], indent=2,
309
+ ensure_ascii=False),
310
+ visible=True)
311
+ sop_edit_btn = gr.Button("修改", visible=True)
312
+ sop_del_btn = gr.Button("删除", visible=True)
313
+ sop_add_btn = gr.Button("增加此环节", visible=False)
314
+ return sop_name, sop_start_condition, sop_participate_agent_names, sop_sys_msg, sop_edit_btn, sop_del_btn, sop_add_btn
315
+
316
+
317
+ def participateAgentChange(names):
318
+ new_sys_msg = {}
319
+ for name in names:
320
+ if (name in states[edit_state_name]["sys_msg"].keys()):
321
+ new_sys_msg[name] = states[edit_state_name]["sys_msg"][name]
322
+ else:
323
+ new_sys_msg[name] = ""
324
+ sop_sys_msg = gr.TextArea(label="agent任务", info="各agent在此环节的任务,无特殊任务的agent可留白不填写",
325
+ value=json.dumps(new_sys_msg, indent=2, ensure_ascii=False),
326
+ visible=True)
327
+ return sop_sys_msg
328
+
329
+
330
+ def editSopBtn(sop_name, sop_start_condition, sop_participate_agent_names, sop_sys_msg):
331
+ new_state = {}
332
+ global states
333
+ for key, item in states.items():
334
+ if edit_state_name == key:
335
+ new_state[sop_name] = {
336
+ "start_condition": sop_start_condition,
337
+ "participate_agent_names": sop_participate_agent_names,
338
+ "sys_msg": json.loads(sop_sys_msg),
339
+ "examples": [],
340
+ }
341
+ else:
342
+ new_state[key] = states[key]
343
+ states = new_state
344
+ state_list = gr.Gallery(
345
+ [("./assets/state.png", key) for key, item in states.items()] + [("./assets/add.jpg", "Add new state")],
346
+ columns=4,
347
+ allow_preview=False
348
+ )
349
+ return state_list
350
+
351
+
352
+ def delSopBtn():
353
+ # TODO:多次删除触发BUG
354
+ del states[edit_state_name]
355
+ state_list = gr.Gallery(
356
+ [("./assets/state.png", key) for key, item in states.items()] + [("./assets/add.jpg", "Add new state")],
357
+ columns=4,
358
+ allow_preview=False
359
+ )
360
+ sop_edit_btn = gr.Button("修改", visible=False)
361
+ sop_del_btn = gr.Button("删除", visible=False)
362
+ sop_add_btn = gr.Button("增加此Agent", visible=False)
363
+ return state_list, sop_edit_btn, sop_del_btn, sop_add_btn
364
+
365
+
366
+ def addSopBtn(name):
367
+ states[name] = {
368
+ "start_condition": "",
369
+ "participate_agent_names": [],
370
+ "sys_msg": {},
371
+ "examples": [],
372
+ }
373
+ state_list = gr.Gallery(
374
+ [("./assets/state.png", key) for key, item in states.items()] + [("./assets/add.jpg", "Add new state")],
375
+ columns=4,
376
+ allow_preview=False
377
+ )
378
+ sop_start_condition = gr.Textbox(placeholder="填写进入此业务环节的条件", label="进入条件", visible=True)
379
+ sop_participate_agent_names = gr.Dropdown([agent.name for agent in agents], label="参与此环节的agents",
380
+ multiselect=True, visible=True)
381
+ sop_sys_msg = gr.TextArea(label="agent任务", info="各agent在此环节的任务,无特殊任务的agent可留白不填写",
382
+ visible=True)
383
+ sop_edit_btn = gr.Button("修改", visible=True)
384
+ sop_del_btn = gr.Button("删除", visible=True)
385
+ sop_add_btn = gr.Button("增加此Agent", visible=False)
386
+ return state_list, sop_start_condition, sop_participate_agent_names, sop_sys_msg, sop_edit_btn, sop_del_btn, sop_add_btn
387
+
388
+
389
+ def targetInput(value):
390
+ global target
391
+ target = value
392
+
393
+
394
+ def sopLlmSelect(llm):
395
+ print(llm)
396
+ global sop_llm_set
397
+ sop_llm_set = llm_dict[llm]
398
+
399
+
400
+ state_list.select(
401
+ stateChange,
402
+ None,
403
+ [sop_name, sop_start_condition, sop_participate_agent_names, sop_sys_msg, sop_edit_btn, sop_del_btn,
404
+ sop_add_btn]
405
+ )
406
+ sop_target.input(targetInput, sop_target)
407
+ sop_llm.select(sopLlmSelect, sop_llm)
408
+ sop_participate_agent_names.change(participateAgentChange, sop_participate_agent_names, sop_sys_msg)
409
+ sop_edit_btn.click(editSopBtn, [sop_name, sop_start_condition, sop_participate_agent_names, sop_sys_msg],
410
+ state_list)
411
+ sop_del_btn.click(delSopBtn, None, [state_list, sop_edit_btn, sop_del_btn, sop_add_btn])
412
+ sop_add_btn.click(addSopBtn, sop_name,
413
+ [state_list, sop_start_condition, sop_participate_agent_names, sop_sys_msg, sop_edit_btn,
414
+ sop_del_btn, sop_add_btn])
415
+ # gr.JSON(label="环节配置", value=states)
416
+
417
+ with gr.Blocks() as chat_interface:
418
+ start_btn = gr.Button('检查配置并启动SOP', visible=True)
419
+ chatbot = gr.Chatbot(visible=False)
420
+ msg = gr.Textbox(visible=False)
421
+ stop_btn = gr.Button("手动结束SOP", visible=False)
422
+ SOP = GradioAutogenSop(target=target, states=states, agents=agents, user_name=user.name,
423
+ llm_config={"config_list": [sop_llm_set]})
424
+
425
+
426
+ def start():
427
+ new_agents = []
428
+ for agent in agents:
429
+ if agent.__class__.__name__ == 'UserProxyAgent':
430
+ new_agents.append(agent)
431
+ print(agent.__dict__)
432
+ else:
433
+ new_agent = agent_types[agent.__class__.__name__](
434
+ name=agent.name,
435
+ system_message=agent.system_message,
436
+ llm_config={"config_list": [llm_dict[agent.llm_config["config_list"][0]["model"]]]}
437
+ )
438
+ del agent
439
+ print(new_agent.__dict__)
440
+ new_agents.append(new_agent)
441
+
442
+ global SOP
443
+ SOP = GradioAutogenSop(target=target, states=states, agents=new_agents, user_name=user.name,
444
+ llm_config={"config_list": [llm_dict[sop_llm_set['model']]]})
445
+ print(SOP.__dict__)
446
+ config_flag = True
447
+ start_btn = gr.Button('检查配置并启动SOP', visible=not config_flag)
448
+ chatbot = gr.Chatbot(visible=config_flag)
449
+ msg = gr.Textbox(visible=config_flag)
450
+ stop_btn = gr.Button("手动结束SOP", visible=config_flag)
451
+ return start_btn, chatbot, msg, stop_btn
452
+
453
+
454
+ def stop():
455
+ global SOP
456
+ del SOP
457
+ config_flag = False
458
+ start_btn = gr.Button('检查配置并启动SOP', visible=not config_flag)
459
+ chatbot = gr.Chatbot(visible=config_flag, value=[])
460
+ msg = gr.Textbox(visible=config_flag)
461
+ stop_btn = gr.Button("手动结束SOP", visible=config_flag)
462
+ return start_btn, chatbot, msg, stop_btn
463
+
464
+
465
+ def readyResponse(msg, chatbot):
466
+ global SOP
467
+ return SOP.ready_response(msg, chatbot)
468
+
469
+
470
+ def getResponse(chatbot):
471
+ global SOP
472
+ f = SOP.get_response(chatbot)
473
+ for item in f:
474
+ yield item
475
+
476
+
477
+ msg.submit(readyResponse, [msg, chatbot], [msg, chatbot]).then(getResponse, chatbot, chatbot)
478
+ start_btn.click(start, None, [start_btn, chatbot, msg, stop_btn])
479
+ stop_btn.click(stop, None, [start_btn, chatbot, msg, stop_btn])
480
+
481
+ with gr.Blocks() as app:
482
+ gr.Markdown("### 西北工业大学·高德宏&杨黎斌&胥基")
483
+ gr.TabbedInterface(
484
+ [llm_config_interface, agent_config_interface, sop_config_interface, chat_interface],
485
+ ["配置LLM", "配置Agents", "配置SOP", "开始聊天"],
486
+ )
487
+
488
+ if __name__ == "__main__":
489
+ app.launch()
assets/add.jpg ADDED
assets/robot.png ADDED
assets/state.png ADDED
autogensop/autogensop.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import autogen
2
+ from autogen.agentchat.contrib.text_analyzer_agent import TextAnalyzerAgent
3
+
4
+ from autogensop.chatmamager import GroupChat, GroupChatManager
5
+
6
+
7
+ # 因为用户可能有自己的对话节奏,这种任务完成再跳出state循环的做法无法适应用户节奏。改用每次用户输入后新判断state
8
+ # 根据用户的输入驱动,用户输入不同的内容,流程前进方向会改变
9
+ # 基于一个state任务task建立一个GroupChat,GroupChat包含一个ChatManager、多个agent、一个User
10
+ # ChatManager:决定跟哪个agent对话(用户根据预设模式介入)
11
+ # agent:只根据ChatManager的提问回答
12
+ # User:模式介入
13
+
14
+
15
+ # 开始对话
16
+ # 1. ChatManager获取除User外所有agent能力
17
+ # 2. ChatManager向用户介绍自己融合其他agents后的能力,询问需求
18
+ # 3. User告诉ChatManager自己的input
19
+ # 4. 检查是否target拥有且满足,是则结束,否则进入5.
20
+ # 5. ChatManager决定处于哪个state
21
+ # 6. ChatManager根据target、task、input、history(可选)决定跟User或哪个agent对话,直至轮到User发言,进入3.
22
+
23
+ class AutogenSop(autogen.ConversableAgent):
24
+ """
25
+ 使用autogen为agent的SOP流程控制
26
+ """
27
+
28
+ # 传入config_path,根据文件配置初始化
29
+ def __init__(
30
+ self,
31
+ target,
32
+ states,
33
+ agents,
34
+ llm_config,
35
+ max_user_input = 100,
36
+ **kwargs,
37
+ ) -> None:
38
+ super().__init__(
39
+ name="State manager",
40
+ human_input_mode="NEVER",
41
+ llm_config=llm_config,
42
+ system_message="You are a state manager",
43
+ **kwargs,
44
+ )
45
+ self.target = target
46
+ self.states = states
47
+ self.agents = agents
48
+ self.llm_config = llm_config
49
+ self.max_user_input = max_user_input
50
+ self.groupchat = autogen.GroupChat(agents, messages=[])
51
+ self.messages = []
52
+
53
+ def _state_start_condition(self):
54
+ return "\n".join([f"{key}: {item['start_condition']}" for key, item in self.states.items()])
55
+
56
+ def _state_task(self):
57
+ return "\n".join([f"{key}: {item['task']}" for key, item in self.states.items()])
58
+
59
+ def judge_target_reached(self):
60
+ # 判断self.target是否满足
61
+ rule = f"""Your judgment condition is {self.target} If the judgment condition is reached, only return: EXIT, else only return: CONTINUE. """
62
+ prompt = self.messages + [{
63
+ "role": "system",
64
+ "content": rule,
65
+ }]
66
+ final, res = self.generate_oai_reply(prompt)
67
+ print('***************target_reached:' + res)
68
+ if final:
69
+ if 'EXIT' in res:
70
+ return True
71
+ else:
72
+ return False
73
+ else:
74
+ print('judge_target_reached 错误')
75
+ exit()
76
+
77
+ def select_state(self):
78
+ # 判断进入的state
79
+ states_rule = f"""Your ultimate goal is {self.target} The optional states are as follows:
80
+ {self._state_start_condition()}.
81
+ Read the above conversation. Then select the next state from {[key for key in self.states]}. Only return the state."""
82
+ prompt = self.messages[-4:] + [{
83
+ "role": "system",
84
+ "content": states_rule,
85
+ }]
86
+ print('【判断进入的state】')
87
+ final, name = self.generate_oai_reply(prompt)
88
+ print('************进入:' + name + '阶段************\n')
89
+ if final:
90
+ return name
91
+ else:
92
+ print('select_state 错误')
93
+ exit()
94
+
95
+ def init_sop(self, user_name):
96
+ user = self.groupchat.agent_by_name(user_name)
97
+ self.stop_reply_at_receive(user)
98
+ self.send(message="请问有什么需要帮助吗?", recipient=user, request_reply=True)
99
+ last_msg = self.last_message()
100
+ last_msg['name'] = user_name
101
+ self.messages.append(last_msg)
102
+ self.max_user_input -= 1
103
+
104
+ manager = GroupChatManager(self.groupchat, user_name=user_name, llm_config=self.llm_config)
105
+ while not self.judge_target_reached() and self.max_user_input > 0:
106
+ # 获取最后一条发言者
107
+ last_speaker = self.groupchat.agent_by_name(self.messages[-1]['name'])
108
+ last_message = self.messages[-1]['content']
109
+
110
+ # 判断state
111
+ if last_speaker.name == user_name:
112
+ state_name = self.select_state()
113
+
114
+ # 根据state更改参与者的sys_msg,将阶段目标拼接到sys_msg后面
115
+ for agent_name, sys_msg in self.states[state_name]['sys_msg'].items():
116
+ agent = self.groupchat.agent_by_name(agent_name)
117
+ if sys_msg not in agent.system_message:
118
+ agent.update_system_message(agent.system_message + '此阶段你的目标是:' + sys_msg)
119
+
120
+ # 创建group chat,继承已有对话
121
+ groupchat = GroupChat(
122
+ [self.groupchat.agent_by_name(agent_name) for agent_name in
123
+ self.states[state_name]['participate_agent_names']],
124
+ messages=self.messages[:-1],
125
+ max_round=4
126
+ )
127
+ manager.groupchat = groupchat
128
+ if len(self.messages) <= 1:
129
+ last_speaker.initiate_chat(
130
+ message=last_message, recipient=manager, clear_history=False
131
+ )
132
+ elif len(self.messages) > 1:
133
+ manager.generate_reply(messages=manager.chat_messages[last_speaker], sender=last_speaker)
134
+ self.messages = groupchat.messages
135
+
136
+ self.max_user_input -= 1
autogensop/chatmamager.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ import sys
3
+ from typing import Dict, List, Optional, Union
4
+ import logging
5
+
6
+ from autogen import Agent, ConversableAgent
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+
11
+ @dataclass
12
+ class GroupChat:
13
+ """A group chat class that contains the following data fields:
14
+ - agents: a list of participating agents.
15
+ - messages: a list of messages in the group chat.
16
+ - max_round: the maximum number of rounds.
17
+ - admin_name: the name of the admin agent if there is one. Default is "Admin".
18
+ KeyBoardInterrupt will make the admin agent take over.
19
+ - func_call_filter: whether to enforce function call filter. Default is True.
20
+ When set to True and when a message is a function call suggestion,
21
+ the next speaker will be chosen from an agent which contains the corresponding function name
22
+ in its `function_map`.
23
+ """
24
+
25
+ agents: List[Agent]
26
+ messages: List[Dict]
27
+ max_round: int = 10
28
+ admin_name: str = "Admin"
29
+ func_call_filter: bool = True
30
+
31
+ @property
32
+ def agent_names(self) -> List[str]:
33
+ """Return the names of the agents in the group chat."""
34
+ return [agent.name for agent in self.agents]
35
+
36
+ def reset(self):
37
+ """Reset the group chat."""
38
+ self.messages.clear()
39
+
40
+ def agent_by_name(self, name: str) -> Agent:
41
+ """Find the next speaker based on the message."""
42
+ return self.agents[self.agent_names.index(name)]
43
+
44
+ def next_agent(self, agent: Agent, agents: List[Agent]) -> Agent:
45
+ """Return the next agent in the list."""
46
+ if agents == self.agents:
47
+ return agents[(self.agent_names.index(agent.name) + 1) % len(agents)]
48
+ else:
49
+ offset = self.agent_names.index(agent.name) + 1
50
+ for i in range(len(self.agents)):
51
+ if self.agents[(offset + i) % len(self.agents)] in agents:
52
+ return self.agents[(offset + i) % len(self.agents)]
53
+
54
+ def select_speaker_msg(self, agents: List[Agent]):
55
+ """Return the message for selecting the next speaker."""
56
+ return f"""You are in a role play game. The following roles are available:
57
+ {self._participant_roles()}.
58
+ Ignoring the order in which the above roles appear.
59
+ Think about the dependency relationships between different roles.
60
+ Read the following conversation.
61
+ Then select the next role from {[agent.name for agent in agents]} to play. Only return the role."""
62
+
63
+ def select_speaker(self, last_speaker: Agent, selector: ConversableAgent):
64
+ """Select the next speaker."""
65
+ if self.func_call_filter and self.messages and "function_call" in self.messages[-1]:
66
+ # find agents with the right function_map which contains the function name
67
+ agents = [
68
+ agent for agent in self.agents if agent.can_execute_function(self.messages[-1]["function_call"]["name"])
69
+ ]
70
+ if len(agents) == 1:
71
+ # only one agent can execute the function
72
+ return agents[0]
73
+ elif not agents:
74
+ # find all the agents with function_map
75
+ agents = [agent for agent in self.agents if agent.function_map]
76
+ if len(agents) == 1:
77
+ return agents[0]
78
+ elif not agents:
79
+ raise ValueError(
80
+ f"No agent can execute the function {self.messages[-1]['name']}. "
81
+ "Please check the function_map of the agents."
82
+ )
83
+ else:
84
+ agents = self.agents
85
+ # Warn if GroupChat is underpopulated
86
+ n_agents = len(agents)
87
+ if n_agents < 3:
88
+ logger.warning(
89
+ f"GroupChat is underpopulated with {n_agents} agents. Direct communication would be more efficient."
90
+ )
91
+ selector.update_system_message(self.select_speaker_msg(agents))
92
+ final, name = selector.generate_oai_reply(
93
+ # 根据前五次对话选择下一个发言人
94
+ self.messages[-5:]
95
+ + [
96
+ {
97
+ "role": "system",
98
+ "content": f"Read the above conversation. Then select the next role from {[agent.name for agent in agents]} to play. Only return the role.",
99
+ }
100
+ ]
101
+ )
102
+ if not final:
103
+ # i = self._random.randint(0, len(self._agent_names) - 1) # randomly pick an id
104
+ return self.next_agent(last_speaker, agents)
105
+ try:
106
+ return self.agent_by_name(name)
107
+ except ValueError:
108
+ return self.next_agent(last_speaker, agents)
109
+
110
+ def _participant_roles(self):
111
+ return "\n".join([f"{agent.name}: {agent.system_message}" for agent in self.agents])
112
+
113
+
114
+ class GroupChatManager(ConversableAgent):
115
+ """(In preview) A chat manager agent that can manage a group chat of multiple agents."""
116
+
117
+ def __init__(
118
+ self,
119
+ groupchat: GroupChat,
120
+ name: Optional[str] = "chat_manager",
121
+ # unlimited consecutive auto reply by default
122
+ max_consecutive_auto_reply: Optional[int] = sys.maxsize,
123
+ human_input_mode: Optional[str] = "NEVER",
124
+ system_message: Optional[str] = "Group chat manager.",
125
+ # seed: Optional[int] = 4,
126
+ task: Optional[str] = "",
127
+ user_name: Optional[str] = "",
128
+ **kwargs,
129
+ ):
130
+ super().__init__(
131
+ name=name,
132
+ max_consecutive_auto_reply=max_consecutive_auto_reply,
133
+ human_input_mode=human_input_mode,
134
+ system_message=system_message,
135
+ **kwargs,
136
+ )
137
+ self.groupchat = groupchat
138
+ self.task = task
139
+ self.user_name = user_name
140
+ self.update_system_message(self.groupchat.select_speaker_msg(self.groupchat.agents))
141
+ self.register_reply(Agent, GroupChatManager.run_chat, config=groupchat)
142
+ # self._random = random.Random(seed)
143
+
144
+ # 弃用,因为用户可能有自己的对话节奏,这种任务完成再跳出state循环的做法无法适应用户节奏。改用每次用户输入后新判断state
145
+ # def judge_task_reached(self):
146
+ # # 判断task是否完成
147
+ # states_rule = f"""The task is '{self.task}'. Read the above conversation. Think about whether the task is complete, only answer yes or no"""
148
+ # prompt = self.groupchat.messages + [{
149
+ # "role": "system",
150
+ # "content": states_rule,
151
+ # }]
152
+ # print('【判断task是否完成】')
153
+ # final, answer = self.generate_oai_reply(prompt)
154
+ # if final:
155
+ # if answer in ['yes', 'yes.', 'Yes', 'Yes.']:
156
+ # print('************task完成************\n')
157
+ # return True
158
+ # elif answer in ['no', 'no.', 'No', 'No.']:
159
+ # print('--------task未完成----------\n')
160
+ # return False
161
+ # else:
162
+ # print('judge_task_reached 未识别')
163
+ # print(answer)
164
+ # exit()
165
+ # else:
166
+ # print('judge_task_reached 错误')
167
+ # exit()
168
+
169
+ def run_chat(
170
+ self,
171
+ messages: Optional[List[Dict]] = None,
172
+ sender: Optional[Agent] = None,
173
+ config: Optional[GroupChat] = None,
174
+ ) -> Union[str, Dict, None]:
175
+ """Run a group chat."""
176
+ if messages is None:
177
+ messages = self._oai_messages[sender]
178
+ message = messages[-1]
179
+ speaker = sender
180
+ groupchat = self.groupchat
181
+ for i in range(groupchat.max_round):
182
+ # set the name to speaker's name if the role is not function
183
+ if message["role"] != "function":
184
+ message["name"] = speaker.name
185
+ groupchat.messages.append(message)
186
+ # broadcast the message to all agents except the speaker
187
+ for agent in groupchat.agents:
188
+ if agent != speaker:
189
+ self.send(message, agent, request_reply=False, silent=True)
190
+ if i != 0 and speaker.name == self.user_name:
191
+ break
192
+ if i == groupchat.max_round - 1:
193
+ # the last round
194
+ break
195
+ try:
196
+ # select the next speaker
197
+ print('【选择发言人】')
198
+ speaker = groupchat.select_speaker(speaker, self)
199
+ print('************选择发言的是:' + speaker.name + '**************\n')
200
+ # let the speaker speak
201
+ reply = speaker.generate_reply(sender=self)
202
+ except KeyboardInterrupt:
203
+ # let the admin agent speak if interrupted
204
+ if groupchat.admin_name in groupchat.agent_names:
205
+ # admin agent is one of the participants
206
+ speaker = groupchat.agent_by_name(groupchat.admin_name)
207
+ reply = speaker.generate_reply(sender=self)
208
+ else:
209
+ # admin agent is not found in the participants
210
+ raise
211
+ if reply is None:
212
+ exit()
213
+ # 如果是可学习的agent则学习
214
+ if hasattr(speaker, 'learn_from_user_feedback'):
215
+ speaker.learn_from_user_feedback()
216
+ # The speaker sends the message without requesting a reply
217
+ speaker.send(reply, self, request_reply=False)
218
+ message = self.last_message(speaker)
219
+ return True, None
autogensop/coor_retrieve_agent.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import chromadb
2
+ from autogen import AssistantAgent, Agent
3
+ from typing import Dict, Optional, Union, List, Tuple, Any
4
+
5
+ from autogen.agentchat.contrib.retrieve_assistant_agent import RetrieveAssistantAgent
6
+ from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent
7
+ from autogen.agentchat.contrib.text_analyzer_agent import TextAnalyzerAgent
8
+
9
+
10
+ class CoorRetrieveGoodsAgent(AssistantAgent):
11
+ def __init__(
12
+ self,
13
+ name: str,
14
+ system_message: Optional[str],
15
+ llm_config: Optional[Union[Dict, bool]],
16
+ **kwargs
17
+ ):
18
+ super().__init__(
19
+ name,
20
+ system_message,
21
+ llm_config=llm_config,
22
+ **kwargs
23
+ )
24
+
25
+ self.analyzer = TextAnalyzerAgent(llm_config=llm_config)
26
+ self.ragproxyagent = RetrieveUserProxyAgent(
27
+ name="ragproxyagent",
28
+ human_input_mode="NEVER",
29
+ max_consecutive_auto_reply=10,
30
+ retrieve_config={
31
+ "customized_prompt": """You're a retrieve augmented chatbot. You answer user's questions based on the
32
+ context provided by the user.
33
+ If you can't answer the question with or without the current context , you should reply exactly `UPDATE CONTEXT`.
34
+ You must give as complete an answer as possible.
35
+
36
+ User's question is: {input_question}
37
+
38
+ Context is: {input_context}
39
+ """,
40
+ "task": 'qa',
41
+ "collection_name": "goods_line",
42
+ "model": self.llm_config["model"],
43
+ "client": chromadb.PersistentClient(path="/tmp/chromadb"),
44
+ "embedding_model": "all-mpnet-base-v2",
45
+ # "get_or_create": False, # set to True if you want to recreate the collection
46
+ # OPTION: 第一次创建collection启用
47
+ "docs_path": "./autogensop/price&goods_line.txt", # 一件商品一行
48
+ "get_or_create": True,
49
+ "chunk_mode": "one_line",
50
+ # OPTION: prompt中有few-shots,可启用回答前缀
51
+ # "customized_answer_prefix": 'The answer is'
52
+ },
53
+ )
54
+ self.assistant = RetrieveAssistantAgent(
55
+ name="assistant",
56
+ system_message=self.system_message + "Only return all products that meet the question.",
57
+ # system_message="You are a helpful assistant. Return all products that meet the question. ",
58
+ llm_config={
59
+ # "request_timeout": 600,
60
+ "seed": 42,
61
+ "config_list": self.llm_config['config_list'],
62
+ },
63
+ )
64
+ self.register_reply(Agent, CoorRetrieveGoodsAgent._generate_retrieve_goods_reply)
65
+
66
+ def _generate_retrieve_goods_reply(
67
+ self,
68
+ messages: Optional[List[Dict]] = None,
69
+ sender: Optional[Agent] = None,
70
+ config: Optional[Any] = None,
71
+ good_name: Optional[Any] = None,
72
+ ) -> Tuple[bool, Union[str, Dict, None]]:
73
+ if config is None:
74
+ config = self
75
+ if messages is None:
76
+ messages = self._oai_messages[sender]
77
+ # prompt = messages + [{
78
+ # "role": "system",
79
+ # "content": "Analyze what product the user wants in the TEXT. Only return the shortest keyword name of the product. ",
80
+ # }]
81
+ # final, good_name = self.generate_oai_reply(prompt)
82
+ # prompt = messages + [{
83
+ # "role": "system",
84
+ # "content": f"You can't consider Salesperson needs or generate need out of thin air. Only consider User needs for the {good_name}, the needs like 'besides...' are important. Then ask assistant to return recommended products, which including all user needs, in one question in TEXT's language. Only return that question as short as possible. ",
85
+ # }]
86
+ # final, question = self.generate_oai_reply(prompt)
87
+ print(messages)
88
+ conversation = [message.get("name", "") + ':' + message.get("content", "") + '\n' for message in messages]
89
+ conversation = '\n'.join(conversation)
90
+ # print(conversation)
91
+ if not good_name:
92
+ good_name = self.analyzer.analyze_text(conversation,
93
+ "Analyze what product the user wants in the TEXT. Only return the shortest keyword name of the product. ")
94
+ question = self.analyzer.analyze_text(conversation,
95
+ f"You can't consider Salesperson needs or generate need out of thin air. Only consider User needs for the {good_name}. Then ask assistant to return recommended products, which including all user needs, in one question in TEXT's language. Only return that question as short as possible. ")
96
+
97
+ self.ragproxyagent.initiate_chat(self.assistant, problem=question, search_string=good_name)
98
+ # ragproxyagent.initiate_chat(assistant, problem=question)
99
+ if self.assistant.last_message()["content"] == "TERMINATE":
100
+ return True, "没有找到您想要的商品,如有其他需要请再告诉我。"
101
+ else:
102
+ return True, self.assistant.last_message()["content"]
103
+
104
+
105
+ class CoorRetrieveQAsAgent(AssistantAgent):
106
+ def __init__(
107
+ self,
108
+ name: str,
109
+ system_message: Optional[str],
110
+ llm_config: Optional[Union[Dict, bool]],
111
+ **kwargs
112
+ ):
113
+ super().__init__(
114
+ name,
115
+ system_message,
116
+ llm_config=llm_config,
117
+ **kwargs
118
+ )
119
+ self.register_reply(Agent, CoorRetrieveQAsAgent._generate_retrieve_qas_reply)
120
+
121
+ def _generate_retrieve_qas_reply(
122
+ self,
123
+ messages: Optional[List[Dict]] = None,
124
+ sender: Optional[Agent] = None,
125
+ config: Optional[Any] = None,
126
+ good_name: Optional[Any] = None,
127
+ ) -> Tuple[bool, Union[str, Dict, None]]:
128
+ if config is None:
129
+ config = self
130
+ if messages is None:
131
+ messages = self._oai_messages[sender]
132
+ question = messages[-1].get("content", "")
133
+ CUSTOMIZED_RETRIEVE_PROMPT = """You're a retrieve augmented chatbot as seller. You answer user's questions based on your own knowledge and the context provided by the user. You must think step-by-step.
134
+ First, please learn the examples in the context.
135
+ Context:{input_context}
136
+
137
+ Second, please answer user's question as seller referring to the best examples. Only return the answer.
138
+ User's question:{input_question}
139
+ """
140
+ ragproxyagent = RetrieveUserProxyAgent(
141
+ name="ragproxyagent",
142
+ human_input_mode="NEVER",
143
+ max_consecutive_auto_reply=10,
144
+ retrieve_config={
145
+ "customized_prompt": CUSTOMIZED_RETRIEVE_PROMPT,
146
+ "task": 'qa',
147
+ "collection_name": "QAs",
148
+ "model": self.llm_config.config_list[0]["model"],
149
+ "client": chromadb.PersistentClient(path="/tmp/chromadb"),
150
+ "embedding_model": "all-mpnet-base-v2",
151
+ "get_or_create": True, # set to True if you want to recreate the collection
152
+ # OPTION: get_or_create为True, 第一次创建collection启用,
153
+ "docs_path": "./autogensop/QAs.txt",
154
+ "chunk_mode": "multi_lines",
155
+ "must_break_at_empty_line": True,
156
+ # OPTION: prompt中有few-shots,可启用回答前缀
157
+ # "customized_answer_prefix": 'The answer is'
158
+ },
159
+ )
160
+ assistant = RetrieveAssistantAgent(
161
+ name="assistant",
162
+ system_message="You are a helpful assistant. ",
163
+ llm_config={
164
+ # "request_timeout": 600,
165
+ "seed": 42,
166
+ "config_list": self.llm_config.config_list,
167
+ },
168
+ )
169
+ ragproxyagent.initiate_chat(assistant, problem=question, search_string=good_name)
170
+ return True, assistant.last_message()["content"]
autogensop/gradio_autogen_sop.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import autogen
2
+ from autogen.agentchat.contrib.text_analyzer_agent import TextAnalyzerAgent
3
+
4
+ from autogensop.gradio_chat_manager import GroupChat, GroupChatManager
5
+
6
+
7
+ class GradioAutogenSop(autogen.ConversableAgent):
8
+ def __init__(
9
+ self,
10
+ target="",
11
+ states=[],
12
+ agents=[],
13
+ user_name="",
14
+ llm_config={},
15
+ max_user_input=100,
16
+ **kwargs,
17
+ ) -> None:
18
+ super().__init__(
19
+ name="State manager",
20
+ human_input_mode="NEVER",
21
+ llm_config=llm_config,
22
+ system_message="You are a state manager",
23
+ **kwargs,
24
+ )
25
+ self.target = target
26
+ self.states = states
27
+ self.agents = agents
28
+ self.user_name = user_name
29
+ self.llm_config = llm_config
30
+ self.max_user_input = max_user_input
31
+ self.groupchat = autogen.GroupChat(agents, messages=[])
32
+ self.user = self.groupchat.agent_by_name(self.user_name)
33
+ self.manager = GroupChatManager(self.groupchat, llm_config=self.llm_config)
34
+ self.messages = []
35
+ self.state_groupchat = None
36
+
37
+ def _state_start_condition(self):
38
+ return "\n".join([f"{key}: {item['start_condition']}" for key, item in self.states.items()])
39
+
40
+ def _state_task(self):
41
+ return "\n".join([f"{key}: {item['task']}" for key, item in self.states.items()])
42
+
43
+ def judge_target_reached(self):
44
+ # 判断self.target是否满足
45
+ rule = f"""Your judgment condition is {self.target} If the judgment condition is reached, only return: EXIT, else only return: CONTINUE. """
46
+ prompt = self.messages + [{
47
+ "role": "system",
48
+ "content": rule,
49
+ }]
50
+ final, res = self.generate_oai_reply(prompt)
51
+ print('***************target_reached:' + res)
52
+ if final:
53
+ if 'EXIT' in res:
54
+ return True
55
+ else:
56
+ return False
57
+ else:
58
+ print('judge_target_reached 错误')
59
+ exit()
60
+
61
+ def select_state(self):
62
+ # 判断进入的state
63
+ states_rule = f"""Your ultimate goal is {self.target} The optional states are as follows:
64
+ {self._state_start_condition()}.
65
+ Read the above conversation. Then select the next state from {[key for key in self.states]}. Only return the state."""
66
+ prompt = self.messages[-4:] + [{
67
+ "role": "system",
68
+ "content": states_rule,
69
+ }]
70
+ print('【判断进入的state】')
71
+ print(prompt)
72
+ final, name = self.generate_oai_reply(prompt)
73
+ while name not in list(self.states.keys()):
74
+ prompt += [{
75
+ "role": "system",
76
+ "content": name + "is wrong answer",
77
+ }]
78
+ final, name = self.generate_oai_reply(prompt)
79
+ print('************进入:' + name + '阶段************\n')
80
+ if final:
81
+ return name
82
+ else:
83
+ print('select_state 错误')
84
+ exit()
85
+
86
+ def get_response(self, history):
87
+ while True:
88
+ print('【选择发言人】')
89
+ speaker = self.manager.groupchat.select_speaker(self.user, self.manager)
90
+ print('************选择发言的是:' + speaker.name + '**************\n')
91
+ if speaker.name == self.user.name:
92
+ break
93
+ # 如果是可学习的agent则学习
94
+ if hasattr(speaker, 'learn_from_user_feedback'):
95
+ speaker.learn_from_user_feedback()
96
+ reply = speaker.generate_reply(messages=self.messages, sender=self.manager)
97
+ self.manager.broadcast(reply, speaker)
98
+ message = self.manager.chat_messages[self.user][-1]
99
+ message["name"] = speaker.name
100
+ self.messages.append(message)
101
+ self.manager.groupchat.messages = self.messages
102
+ history += [[None, " **" + speaker.name + ":** " + reply]]
103
+ yield history
104
+ yield history
105
+
106
+ def ready_response(self, msg, history):
107
+ self.user.send(message=msg, recipient=self.manager, request_reply=False)
108
+ message = self.manager.chat_messages[self.user][-1]
109
+ message["name"] = self.user.name
110
+ self.messages.append(message)
111
+ state_name = self.select_state()
112
+ # 根据state更改参与者的sys_msg,将阶段目标拼接到sys_msg后面
113
+ for agent_name, sys_msg in self.states[state_name]['sys_msg'].items():
114
+ agent = self.groupchat.agent_by_name(agent_name)
115
+ if sys_msg not in agent.system_message:
116
+ agent.update_system_message(agent.system_message + '此阶段你的目标是:' + sys_msg)
117
+ # 创建group chat,继承已有对话
118
+ self.state_groupchat = GroupChat(
119
+ [self.groupchat.agent_by_name(agent_name) for agent_name in
120
+ self.states[state_name]['participate_agent_names']],
121
+ messages=self.messages,
122
+ max_round=4
123
+ )
124
+ self.manager.groupchat = self.state_groupchat
125
+ self.manager.broadcast(msg, self.user)
126
+ return "", history + [[msg, None]]
autogensop/gradio_chat_manager.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ import sys
3
+ from typing import Dict, List, Optional, Union
4
+ import logging
5
+
6
+ from autogen import Agent, ConversableAgent
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+
11
+ @dataclass
12
+ class GroupChat:
13
+ """A group chat class that contains the following data fields:
14
+ - agents: a list of participating agents.
15
+ - messages: a list of messages in the group chat.
16
+ - max_round: the maximum number of rounds.
17
+ - admin_name: the name of the admin agent if there is one. Default is "Admin".
18
+ KeyBoardInterrupt will make the admin agent take over.
19
+ - func_call_filter: whether to enforce function call filter. Default is True.
20
+ When set to True and when a message is a function call suggestion,
21
+ the next speaker will be chosen from an agent which contains the corresponding function name
22
+ in its `function_map`.
23
+ """
24
+
25
+ agents: List[Agent]
26
+ messages: List[Dict]
27
+ max_round: int = 10
28
+ admin_name: str = "Admin"
29
+ func_call_filter: bool = True
30
+
31
+ @property
32
+ def agent_names(self) -> List[str]:
33
+ """Return the names of the agents in the group chat."""
34
+ return [agent.name for agent in self.agents]
35
+
36
+ def reset(self):
37
+ """Reset the group chat."""
38
+ self.messages.clear()
39
+
40
+ def agent_by_name(self, name: str) -> Agent:
41
+ """Find the next speaker based on the message."""
42
+ return self.agents[self.agent_names.index(name)]
43
+
44
+ def next_agent(self, agent: Agent, agents: List[Agent]) -> Agent:
45
+ """Return the next agent in the list."""
46
+ if agents == self.agents:
47
+ return agents[(self.agent_names.index(agent.name) + 1) % len(agents)]
48
+ else:
49
+ offset = self.agent_names.index(agent.name) + 1
50
+ for i in range(len(self.agents)):
51
+ if self.agents[(offset + i) % len(self.agents)] in agents:
52
+ return self.agents[(offset + i) % len(self.agents)]
53
+
54
+ def select_speaker_msg(self, agents: List[Agent]):
55
+ """Return the message for selecting the next speaker."""
56
+ return f"""You are in a role play game. The following roles are available:
57
+ {self._participant_roles()}.
58
+ Ignoring the order in which the above roles appear.
59
+ Think about the dependency relationships between different roles.
60
+ Read the following conversation.
61
+ Then select the next role from {[agent.name for agent in agents]} to play. Only return the role."""
62
+
63
+ def select_speaker(self, last_speaker: Agent, selector: ConversableAgent):
64
+ """Select the next speaker."""
65
+ if self.func_call_filter and self.messages and "function_call" in self.messages[-1]:
66
+ # find agents with the right function_map which contains the function name
67
+ agents = [
68
+ agent for agent in self.agents if agent.can_execute_function(self.messages[-1]["function_call"]["name"])
69
+ ]
70
+ if len(agents) == 1:
71
+ # only one agent can execute the function
72
+ return agents[0]
73
+ elif not agents:
74
+ # find all the agents with function_map
75
+ agents = [agent for agent in self.agents if agent.function_map]
76
+ if len(agents) == 1:
77
+ return agents[0]
78
+ elif not agents:
79
+ raise ValueError(
80
+ f"No agent can execute the function {self.messages[-1]['name']}. "
81
+ "Please check the function_map of the agents."
82
+ )
83
+ else:
84
+ agents = self.agents
85
+ # Warn if GroupChat is underpopulated
86
+ n_agents = len(agents)
87
+ if n_agents < 3:
88
+ logger.warning(
89
+ f"GroupChat is underpopulated with {n_agents} agents. Direct communication would be more efficient."
90
+ )
91
+ selector.update_system_message(self.select_speaker_msg(agents))
92
+ prompt = self.messages[-5:] + [{
93
+ "role": "system",
94
+ "content": f"Read the above conversation. Then select the next role from {[agent.name for agent in agents]} to play. Only return the role.",
95
+ }]
96
+ print(prompt)
97
+ final, name = selector.generate_oai_reply(
98
+ # 根据前五次对话选择下一个发言人
99
+ prompt
100
+ )
101
+ if not final:
102
+ # i = self._random.randint(0, len(self._agent_names) - 1) # randomly pick an id
103
+ return self.next_agent(last_speaker, agents)
104
+ try:
105
+ return self.agent_by_name(name)
106
+ except ValueError:
107
+ return self.next_agent(last_speaker, agents)
108
+
109
+ def _participant_roles(self):
110
+ return "\n".join([f"{agent.name}: {agent.system_message}" for agent in self.agents])
111
+
112
+
113
+ class GroupChatManager(ConversableAgent):
114
+ """(In preview) A chat manager agent that can manage a group chat of multiple agents."""
115
+
116
+ def __init__(
117
+ self,
118
+ groupchat: GroupChat,
119
+ name: Optional[str] = "chat_manager",
120
+ max_consecutive_auto_reply: Optional[int] = sys.maxsize,
121
+ human_input_mode: Optional[str] = "NEVER",
122
+ system_message: Optional[str] = "Group chat manager.",
123
+ **kwargs,
124
+ ):
125
+ super().__init__(
126
+ name=name,
127
+ max_consecutive_auto_reply=max_consecutive_auto_reply,
128
+ human_input_mode=human_input_mode,
129
+ system_message=system_message,
130
+ **kwargs,
131
+ )
132
+ self.groupchat = groupchat
133
+ self.update_system_message(self.groupchat.select_speaker_msg(self.groupchat.agents))
134
+
135
+ def broadcast(
136
+ self,
137
+ message: Optional[str] = None,
138
+ sender: Optional[Agent] = None,
139
+ ) -> Union[str, Dict, None]:
140
+ for agent in self.groupchat.agents:
141
+ if agent != sender:
142
+ self.send(message, agent, request_reply=False)
autogensop/price&goods_line.txt ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ chromadb==0.4.15
2
+ gradio==4.19.1
3
+ pyautogen==0.2.0
4
+ openai==1.3.6
5
+ pypdf==3.17.0
6
+ IPython==8.17.2
7
+ sentence_transformers==2.2.2
8
+