Tuchuanhuhuhu commited on
Commit
fab043c
·
1 Parent(s): c8fd9d2

禁用自动总结功能

Browse files
Files changed (1) hide show
  1. modules/chat_func.py +25 -25
modules/chat_func.py CHANGED
@@ -63,7 +63,7 @@ def get_response(
63
  # 如果有自定义的api-url,使用自定义url发送请求,否则使用默认设置发送请求
64
  if shared.state.api_url != API_URL:
65
  logging.info(f"使用自定义API URL: {shared.state.api_url}")
66
-
67
  response = requests.post(
68
  shared.state.api_url,
69
  headers=headers,
@@ -72,7 +72,7 @@ def get_response(
72
  timeout=timeout,
73
  proxies=proxies,
74
  )
75
-
76
  return response
77
 
78
 
@@ -350,29 +350,29 @@ def predict(
350
  + colorama.Style.RESET_ALL
351
  )
352
 
353
- if stream:
354
- max_token = max_token_streaming
355
- else:
356
- max_token = max_token_all
357
-
358
- if sum(all_token_counts) > max_token and should_check_token_count:
359
- status_text = f"精简token中{all_token_counts}/{max_token}"
360
- logging.info(status_text)
361
- yield chatbot, history, status_text, all_token_counts
362
- iter = reduce_token_size(
363
- openai_api_key,
364
- system_prompt,
365
- history,
366
- chatbot,
367
- all_token_counts,
368
- top_p,
369
- temperature,
370
- max_token//2,
371
- selected_model=selected_model,
372
- )
373
- for chatbot, history, status_text, all_token_counts in iter:
374
- status_text = f"Token 达到上限,已自动降低Token计数至 {status_text}"
375
- yield chatbot, history, status_text, all_token_counts
376
 
377
 
378
  def retry(
 
63
  # 如果有自定义的api-url,使用自定义url发送请求,否则使用默认设置发送请求
64
  if shared.state.api_url != API_URL:
65
  logging.info(f"使用自定义API URL: {shared.state.api_url}")
66
+
67
  response = requests.post(
68
  shared.state.api_url,
69
  headers=headers,
 
72
  timeout=timeout,
73
  proxies=proxies,
74
  )
75
+
76
  return response
77
 
78
 
 
350
  + colorama.Style.RESET_ALL
351
  )
352
 
353
+ # if stream:
354
+ # max_token = max_token_streaming
355
+ # else:
356
+ # max_token = max_token_all
357
+
358
+ # if sum(all_token_counts) > max_token and should_check_token_count:
359
+ # status_text = f"精简token中{all_token_counts}/{max_token}"
360
+ # logging.info(status_text)
361
+ # yield chatbot, history, status_text, all_token_counts
362
+ # iter = reduce_token_size(
363
+ # openai_api_key,
364
+ # system_prompt,
365
+ # history,
366
+ # chatbot,
367
+ # all_token_counts,
368
+ # top_p,
369
+ # temperature,
370
+ # max_token//2,
371
+ # selected_model=selected_model,
372
+ # )
373
+ # for chatbot, history, status_text, all_token_counts in iter:
374
+ # status_text = f"Token 达到上限,已自动降低Token计数至 {status_text}"
375
+ # yield chatbot, history, status_text, all_token_counts
376
 
377
 
378
  def retry(