Spaces:
Sleeping
Sleeping
Tuchuanhuhuhu
commited on
Commit
·
3545d4e
1
Parent(s):
3c2937a
搜索模式中,将链接附在AI回复后面
Browse files- chat_func.py +10 -14
chat_func.py
CHANGED
@@ -102,7 +102,8 @@ def stream_predict(
|
|
102 |
top_p,
|
103 |
temperature,
|
104 |
selected_model,
|
105 |
-
fake_input=None
|
|
|
106 |
):
|
107 |
def get_return_value():
|
108 |
return chatbot, history, status_text, all_token_counts
|
@@ -191,7 +192,7 @@ def stream_predict(
|
|
191 |
yield get_return_value()
|
192 |
break
|
193 |
history[-1] = construct_assistant(partial_words)
|
194 |
-
chatbot[-1] = (chatbot[-1][0], parse_text(partial_words))
|
195 |
all_token_counts[-1] += 1
|
196 |
yield get_return_value()
|
197 |
|
@@ -206,7 +207,8 @@ def predict_all(
|
|
206 |
top_p,
|
207 |
temperature,
|
208 |
selected_model,
|
209 |
-
fake_input=None
|
|
|
210 |
):
|
211 |
logging.info("一次性回答模式")
|
212 |
history.append(construct_user(inputs))
|
@@ -240,7 +242,7 @@ def predict_all(
|
|
240 |
response = json.loads(response.text)
|
241 |
content = response["choices"][0]["message"]["content"]
|
242 |
history[-1] = construct_assistant(content)
|
243 |
-
chatbot[-1] = (chatbot[-1][0], parse_text(content))
|
244 |
total_token_count = response["usage"]["total_tokens"]
|
245 |
all_token_counts[-1] = total_token_count - sum(all_token_counts)
|
246 |
status_text = construct_token_message(total_token_count)
|
@@ -284,7 +286,8 @@ def predict(
|
|
284 |
logging.info(f"搜索结果{idx + 1}:{result}")
|
285 |
domain_name = urllib3.util.parse_url(result["href"]).host
|
286 |
web_results.append(f'[{idx+1}]"{result["body"]}"\nURL: {result["href"]}')
|
287 |
-
link_references.append(f"
|
|
|
288 |
inputs = (
|
289 |
replace_today(WEBSEARCH_PTOMPT_TEMPLATE)
|
290 |
.replace("{query}", inputs)
|
@@ -318,7 +321,8 @@ def predict(
|
|
318 |
top_p,
|
319 |
temperature,
|
320 |
selected_model,
|
321 |
-
fake_input=old_inputs
|
|
|
322 |
)
|
323 |
for chatbot, history, status_text, all_token_counts in iter:
|
324 |
yield chatbot, history, status_text, all_token_counts
|
@@ -347,14 +351,6 @@ def predict(
|
|
347 |
+ colorama.Style.RESET_ALL
|
348 |
)
|
349 |
|
350 |
-
if use_websearch:
|
351 |
-
response = history[-1]['content']
|
352 |
-
response += "\n\n" + "\n".join(link_references)
|
353 |
-
logging.info(f"Added link references.")
|
354 |
-
logging.info(response)
|
355 |
-
chatbot[-1] = (parse_text(old_inputs), response)
|
356 |
-
yield chatbot, history, status_text, all_token_counts
|
357 |
-
|
358 |
if stream:
|
359 |
max_token = max_token_streaming
|
360 |
else:
|
|
|
102 |
top_p,
|
103 |
temperature,
|
104 |
selected_model,
|
105 |
+
fake_input=None,
|
106 |
+
display_append=""
|
107 |
):
|
108 |
def get_return_value():
|
109 |
return chatbot, history, status_text, all_token_counts
|
|
|
192 |
yield get_return_value()
|
193 |
break
|
194 |
history[-1] = construct_assistant(partial_words)
|
195 |
+
chatbot[-1] = (chatbot[-1][0], parse_text(partial_words+display_append))
|
196 |
all_token_counts[-1] += 1
|
197 |
yield get_return_value()
|
198 |
|
|
|
207 |
top_p,
|
208 |
temperature,
|
209 |
selected_model,
|
210 |
+
fake_input=None,
|
211 |
+
display_append=""
|
212 |
):
|
213 |
logging.info("一次性回答模式")
|
214 |
history.append(construct_user(inputs))
|
|
|
242 |
response = json.loads(response.text)
|
243 |
content = response["choices"][0]["message"]["content"]
|
244 |
history[-1] = construct_assistant(content)
|
245 |
+
chatbot[-1] = (chatbot[-1][0], parse_text(content+display_append))
|
246 |
total_token_count = response["usage"]["total_tokens"]
|
247 |
all_token_counts[-1] = total_token_count - sum(all_token_counts)
|
248 |
status_text = construct_token_message(total_token_count)
|
|
|
286 |
logging.info(f"搜索结果{idx + 1}:{result}")
|
287 |
domain_name = urllib3.util.parse_url(result["href"]).host
|
288 |
web_results.append(f'[{idx+1}]"{result["body"]}"\nURL: {result["href"]}')
|
289 |
+
link_references.append(f"{idx+1}. [{domain_name}]({result['href']})\n")
|
290 |
+
link_references = "\n\n" + "".join(link_references)
|
291 |
inputs = (
|
292 |
replace_today(WEBSEARCH_PTOMPT_TEMPLATE)
|
293 |
.replace("{query}", inputs)
|
|
|
321 |
top_p,
|
322 |
temperature,
|
323 |
selected_model,
|
324 |
+
fake_input=old_inputs,
|
325 |
+
display_append=link_references
|
326 |
)
|
327 |
for chatbot, history, status_text, all_token_counts in iter:
|
328 |
yield chatbot, history, status_text, all_token_counts
|
|
|
351 |
+ colorama.Style.RESET_ALL
|
352 |
)
|
353 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
354 |
if stream:
|
355 |
max_token = max_token_streaming
|
356 |
else:
|