Tuchuanhuhuhu commited on
Commit
386dd02
·
1 Parent(s): f504a7f

优化川虎助理显示

Browse files
Files changed (1) hide show
  1. modules/models/base_model.py +23 -4
modules/models/base_model.py CHANGED
@@ -62,6 +62,19 @@ class CallbackToIterator:
62
  self.finished = True
63
  self.cond.notify() # Wake up the generator if it's waiting.
64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  class ChuanhuCallbackHandler(BaseCallbackHandler):
66
 
67
  def __init__(self, callback) -> None:
@@ -71,7 +84,7 @@ class ChuanhuCallbackHandler(BaseCallbackHandler):
71
  def on_agent_action(
72
  self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
73
  ) -> Any:
74
- self.callback(action.log)
75
 
76
  def on_tool_end(
77
  self,
@@ -82,16 +95,22 @@ class ChuanhuCallbackHandler(BaseCallbackHandler):
82
  **kwargs: Any,
83
  ) -> None:
84
  """If not the final action, print out observation."""
 
 
 
 
 
85
  if observation_prefix is not None:
86
- self.callback(f"\n\n{observation_prefix}")
87
  self.callback(output)
88
  if llm_prefix is not None:
89
- self.callback(f"\n\n{llm_prefix}")
90
 
91
  def on_agent_finish(
92
  self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
93
  ) -> None:
94
- self.callback(f"{finish.log}\n\n")
 
95
 
96
  def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
97
  """Run on new LLM token. Only available when streaming is enabled."""
 
62
  self.finished = True
63
  self.cond.notify() # Wake up the generator if it's waiting.
64
 
65
+ def get_action_description(text):
66
+ match = re.search('```(.*?)```', text, re.S)
67
+ json_text = match.group(1)
68
+ # 把json转化为python字典
69
+ json_dict = json.loads(json_text)
70
+ # 提取'action'和'action_input'的值
71
+ action_name = json_dict['action']
72
+ action_input = json_dict['action_input']
73
+ if action_name != "Final Answer":
74
+ return f'<p style="font-size: smaller; color: gray;">{action_name}: {action_input}</p>'
75
+ else:
76
+ return ""
77
+
78
  class ChuanhuCallbackHandler(BaseCallbackHandler):
79
 
80
  def __init__(self, callback) -> None:
 
84
  def on_agent_action(
85
  self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
86
  ) -> Any:
87
+ self.callback(get_action_description(action.log))
88
 
89
  def on_tool_end(
90
  self,
 
95
  **kwargs: Any,
96
  ) -> None:
97
  """If not the final action, print out observation."""
98
+ # if observation_prefix is not None:
99
+ # self.callback(f"\n\n{observation_prefix}")
100
+ # self.callback(output)
101
+ # if llm_prefix is not None:
102
+ # self.callback(f"\n\n{llm_prefix}")
103
  if observation_prefix is not None:
104
+ logging.info(observation_prefix)
105
  self.callback(output)
106
  if llm_prefix is not None:
107
+ logging.info(llm_prefix)
108
 
109
  def on_agent_finish(
110
  self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
111
  ) -> None:
112
+ # self.callback(f"{finish.log}\n\n")
113
+ logging.info(finish.log)
114
 
115
  def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
116
  """Run on new LLM token. Only available when streaming is enabled."""