curry tang commited on
Commit
bffe050
1 Parent(s): 9458b7d
Files changed (2) hide show
  1. app.py +21 -21
  2. requirements.txt +3 -2
app.py CHANGED
@@ -27,28 +27,28 @@ def get_default_chat():
27
  return _llm.get_chat_engine()
28
 
29
 
30
- def predict(message, history, chat, _current_assistant):
31
- print('!!!!!', message, history, chat, _current_assistant)
32
- history_len = len(history)
33
  files_len = len(message.files)
34
- if chat is None:
35
- chat = get_default_chat()
36
- history_messages = []
37
- for human, assistant in history:
38
- history_messages.append(HumanMessage(content=human))
39
- if assistant is not None:
40
- history_messages.append(AIMessage(content=assistant))
41
-
42
- if history_len == 0:
43
- assistant_prompt = web_prompt
44
- if _current_assistant == '后端开发助手':
45
- assistant_prompt = backend_developer_prompt
46
- if _current_assistant == '数据分析师':
47
- assistant_prompt = analyst_prompt
48
- history_messages.append(SystemMessage(content=assistant_prompt))
 
49
 
50
  if files_len == 0:
51
- history_messages.append(HumanMessage(content=message.text))
52
  else:
53
  file = message.files[0]
54
  with Image.open(file.path) as img:
@@ -56,13 +56,13 @@ def predict(message, history, chat, _current_assistant):
56
  img = img.convert('RGB')
57
  img.save(buffer, format="JPEG")
58
  image_data = base64.b64encode(buffer.getvalue()).decode("utf-8")
59
- history_messages.append(HumanMessage(content=[
60
  {"type": "text", "text": message.text},
61
  {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image_data}"}}
62
  ]))
63
 
64
  response_message = ''
65
- for chunk in chat.stream(history_messages):
66
  response_message = response_message + chunk.content
67
  yield response_message
68
 
 
27
  return _llm.get_chat_engine()
28
 
29
 
30
+ def predict(message, history, _chat, _current_assistant):
31
+ print('!!!!!', message, history, _chat, _current_assistant)
 
32
  files_len = len(message.files)
33
+ if _chat is None:
34
+ _chat = get_default_chat()
35
+ _lc_history = []
36
+
37
+ assistant_prompt = web_prompt
38
+ if _current_assistant == '后端开发助手':
39
+ assistant_prompt = backend_developer_prompt
40
+ if _current_assistant == '数据分析师':
41
+ assistant_prompt = analyst_prompt
42
+ _lc_history.append(SystemMessage(content=assistant_prompt))
43
+
44
+ for his_msg in history:
45
+ if his_msg['role'] == 'user':
46
+ _lc_history.append(HumanMessage(content=his_msg['content']))
47
+ if his_msg['role'] == 'assistant':
48
+ _lc_history.append(AIMessage(content=his_msg['content']))
49
 
50
  if files_len == 0:
51
+ _lc_history.append(HumanMessage(content=message.text))
52
  else:
53
  file = message.files[0]
54
  with Image.open(file.path) as img:
 
56
  img = img.convert('RGB')
57
  img.save(buffer, format="JPEG")
58
  image_data = base64.b64encode(buffer.getvalue()).decode("utf-8")
59
+ _lc_history.append(HumanMessage(content=[
60
  {"type": "text", "text": message.text},
61
  {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image_data}"}}
62
  ]))
63
 
64
  response_message = ''
65
+ for chunk in _chat.stream(_lc_history):
66
  response_message = response_message + chunk.content
67
  yield response_message
68
 
requirements.txt CHANGED
@@ -97,11 +97,11 @@ kiwisolver==1.4.5
97
  # via matplotlib
98
  langchain==0.2.11
99
  # via startship
100
- langchain-core==0.2.23
101
  # via langchain
102
  # via langchain-openai
103
  # via langchain-text-splitters
104
- langchain-openai==0.1.17
105
  # via startship
106
  langchain-text-splitters==0.2.2
107
  # via langchain
@@ -224,6 +224,7 @@ typing-extensions==4.12.2
224
  # via gradio
225
  # via gradio-client
226
  # via huggingface-hub
 
227
  # via openai
228
  # via pydantic
229
  # via pydantic-core
 
97
  # via matplotlib
98
  langchain==0.2.11
99
  # via startship
100
+ langchain-core==0.2.28
101
  # via langchain
102
  # via langchain-openai
103
  # via langchain-text-splitters
104
+ langchain-openai==0.1.20
105
  # via startship
106
  langchain-text-splitters==0.2.2
107
  # via langchain
 
224
  # via gradio
225
  # via gradio-client
226
  # via huggingface-hub
227
+ # via langchain-core
228
  # via openai
229
  # via pydantic
230
  # via pydantic-core