Tuchuanhuhuhu commited on
Commit
3742fa4
·
1 Parent(s): be9f42e

更新依赖版本

Browse files
ChuanhuChatbot.py CHANGED
@@ -87,7 +87,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
87
  label=i18n("选择LoRA模型"), choices=[], multiselect=False, interactive=True, visible=False
88
  )
89
  with gr.Row():
90
- autogpt_mode = gr.Checkbox(label=i18n("AutoGPT 模式"), value=True)
91
  single_turn_checkbox = gr.Checkbox(label=i18n("单轮对话"), value=False)
92
  use_websearch_checkbox = gr.Checkbox(label=i18n("使用在线搜索"), value=False)
93
  # render_latex_checkbox = gr.Checkbox(label=i18n("渲染LaTeX公式"), value=render_latex, interactive=True, elem_id="render_latex_checkbox")
@@ -288,6 +288,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
288
  chatbot,
289
  use_streaming_checkbox,
290
  use_websearch_checkbox,
 
291
  index_files,
292
  language_select_dropdown,
293
  ],
@@ -350,6 +351,7 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
350
  chatbot,
351
  use_streaming_checkbox,
352
  use_websearch_checkbox,
 
353
  index_files,
354
  language_select_dropdown,
355
  ],
 
87
  label=i18n("选择LoRA模型"), choices=[], multiselect=False, interactive=True, visible=False
88
  )
89
  with gr.Row():
90
+ autogpt_mode = gr.Checkbox(label=i18n("AutoGPT 模式"), value=False)
91
  single_turn_checkbox = gr.Checkbox(label=i18n("单轮对话"), value=False)
92
  use_websearch_checkbox = gr.Checkbox(label=i18n("使用在线搜索"), value=False)
93
  # render_latex_checkbox = gr.Checkbox(label=i18n("渲染LaTeX公式"), value=render_latex, interactive=True, elem_id="render_latex_checkbox")
 
288
  chatbot,
289
  use_streaming_checkbox,
290
  use_websearch_checkbox,
291
+ autogpt_mode,
292
  index_files,
293
  language_select_dropdown,
294
  ],
 
351
  chatbot,
352
  use_streaming_checkbox,
353
  use_websearch_checkbox,
354
+ autogpt_mode,
355
  index_files,
356
  language_select_dropdown,
357
  ],
modules/llama_func.py CHANGED
@@ -107,7 +107,7 @@ def construct_index(
107
  ):
108
  from langchain.chat_models import ChatOpenAI
109
  from langchain.embeddings.huggingface import HuggingFaceEmbeddings
110
- from llama_index import GPTSimpleVectorIndex, ServiceContext, LangchainEmbedding, OpenAIEmbedding
111
 
112
  if api_key:
113
  os.environ["OPENAI_API_KEY"] = api_key
@@ -129,7 +129,7 @@ def construct_index(
129
  index_name = get_index_name(file_src)
130
  if os.path.exists(f"./index/{index_name}.json"):
131
  logging.info("找到了缓存的索引文件,加载中……")
132
- return GPTSimpleVectorIndex.load_from_disk(f"./index/{index_name}.json")
133
  else:
134
  try:
135
  documents = get_documents(file_src)
@@ -144,12 +144,12 @@ def construct_index(
144
  chunk_size_limit=chunk_size_limit,
145
  embed_model=embed_model,
146
  )
147
- index = GPTSimpleVectorIndex.from_documents(
148
  documents, service_context=service_context
149
  )
150
  logging.debug("索引构建完成!")
151
  os.makedirs("./index", exist_ok=True)
152
- index.save_to_disk(f"./index/{index_name}.json")
153
  logging.debug("索引已保存至本地!")
154
  return index
155
 
 
107
  ):
108
  from langchain.chat_models import ChatOpenAI
109
  from langchain.embeddings.huggingface import HuggingFaceEmbeddings
110
+ from llama_index import GPTVectorStoreIndex, ServiceContext, LangchainEmbedding, OpenAIEmbedding
111
 
112
  if api_key:
113
  os.environ["OPENAI_API_KEY"] = api_key
 
129
  index_name = get_index_name(file_src)
130
  if os.path.exists(f"./index/{index_name}.json"):
131
  logging.info("找到了缓存的索引文件,加载中……")
132
+ return GPTVectorStoreIndex.load_from_disk(f"./index/{index_name}.json")
133
  else:
134
  try:
135
  documents = get_documents(file_src)
 
144
  chunk_size_limit=chunk_size_limit,
145
  embed_model=embed_model,
146
  )
147
+ index = GPTVectorStoreIndex.from_documents(
148
  documents, service_context=service_context
149
  )
150
  logging.debug("索引构建完成!")
151
  os.makedirs("./index", exist_ok=True)
152
+ index.storage_context.persist(f"./index/{index_name}")
153
  logging.debug("索引已保存至本地!")
154
  return index
155
 
modules/models/base_model.py CHANGED
@@ -277,6 +277,7 @@ class BaseLLMModel:
277
  chatbot,
278
  stream=False,
279
  use_websearch=False,
 
280
  files=None,
281
  reply_language="中文",
282
  should_check_token_count=True,
@@ -383,6 +384,7 @@ class BaseLLMModel:
383
  chatbot,
384
  stream=False,
385
  use_websearch=False,
 
386
  files=None,
387
  reply_language="中文",
388
  ):
@@ -402,6 +404,7 @@ class BaseLLMModel:
402
  chatbot,
403
  stream=stream,
404
  use_websearch=use_websearch,
 
405
  files=files,
406
  reply_language=reply_language,
407
  )
 
277
  chatbot,
278
  stream=False,
279
  use_websearch=False,
280
+ autogpt_mode=False,
281
  files=None,
282
  reply_language="中文",
283
  should_check_token_count=True,
 
384
  chatbot,
385
  stream=False,
386
  use_websearch=False,
387
+ autogpt_mode=False,
388
  files=None,
389
  reply_language="中文",
390
  ):
 
404
  chatbot,
405
  stream=stream,
406
  use_websearch=use_websearch,
407
+ autogpt_mode=autogpt_mode,
408
  files=files,
409
  reply_language=reply_language,
410
  )
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
- gradio==3.28.0
2
  gradio_client==0.1.4
3
  mdtex2html
4
  pypinyin
@@ -8,8 +8,8 @@ tqdm
8
  colorama
9
  duckduckgo_search==2.9.5
10
  Pygments
11
- llama_index==0.5.25
12
- langchain<0.0.150
13
  markdown
14
  PyPDF2
15
  pdfplumber
 
1
+ gradio==3.30.0
2
  gradio_client==0.1.4
3
  mdtex2html
4
  pypinyin
 
8
  colorama
9
  duckduckgo_search==2.9.5
10
  Pygments
11
+ llama_index==0.6.8
12
+ langchain==0.0.170
13
  markdown
14
  PyPDF2
15
  pdfplumber