XJFKKK commited on
Commit
f89b3b0
·
verified ·
1 Parent(s): 7e06c4e

Update config_private.py

Browse files
Files changed (1) hide show
  1. config_private.py +19 -5
config_private.py CHANGED
@@ -31,11 +31,25 @@ else:
31
  proxies = None
32
 
33
  # [step 3]>> 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
34
- LLM_MODEL = "gpt-3.5-turbo-16k" # 可选 ↓↓↓
35
- AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-preview",
36
- "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
37
- "gpt-4", "gpt-4-32k", "azure-gpt-4", "glm-4", "glm-3-turbo",
38
- "gemini-pro", "chatglm3", "claude-2"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  # --- --- --- ---
40
  # P.S. 其他可用的模型还包括
41
  # AVAIL_LLM_MODELS = [
 
31
  proxies = None
32
 
33
  # [step 3]>> 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
34
+ LLM_MODEL = "gemini-1.5-pro-latest" # 可选 ↓↓↓
35
+ AVAIL_LLM_MODELS = [
36
+ "@cf/deepseek-ai/deepseek-math-7b-base", "@cf/deepseek-ai/deepseek-math-7b-instruct", "@cf/defog/sqlcoder-7b-2", "@cf/google/gemma-2b-it-lora",
37
+ "@cf/google/gemma-7b-it-lora", "@cf/meta-llama/llama-2-7b-chat-hf-lora", "@cf/meta/llama-2-7b-chat-fp16", "@cf/meta/llama-2-7b-chat-int8",
38
+ "@cf/meta/llama-3-8b-instruct", "@cf/microsoft/phi-2", "@cf/mistral/mistral-7b-instruct-v0.1", "@cf/mistral/mistral-7b-instruct-v0.2-lora",
39
+ "@cf/openchat/openchat-3.5-0106", "@cf/qwen/qwen1.5-0.5b-chat", "@cf/qwen/qwen1.5-1.8b-chat", "@cf/qwen/qwen1.5-14b-chat-awq",
40
+ "@cf/qwen/qwen1.5-7b-chat-awq", "@cf/thebloke/discolm-german-7b-v1-awq", "@cf/tiiuae/falcon-7b-instruct",
41
+ "@cf/tinyllama/tinyllama-1.1b-chat-v1.0", "@hf/google/gemma-7b-it", "@hf/mistralai/mistral-7b-instruct-v0.2", "@hf/nexusflow/starling-lm-7b-beta",
42
+ "@hf/nousresearch/hermes-2-pro-mistral-7b", "@hf/thebloke/deepseek-coder-6.7b-base-awq", "@hf/thebloke/deepseek-coder-6.7b-instruct-awq",
43
+ "@hf/thebloke/llama-2-13b-chat-awq", "@hf/thebloke/llamaguard-7b-awq", "@hf/thebloke/mistral-7b-instruct-v0.1-awq",
44
+ "@hf/thebloke/neural-chat-7b-v3-1-awq", "@hf/thebloke/openhermes-2.5-mistral-7b-awq", "@hf/thebloke/zephyr-7b-beta-awq", "Atom-13B-Chat",
45
+ "Atom-1B-Chat", "Atom-7B-Chat", "Doubao-lite-128k", "Doubao-lite-32k", "Doubao-lite-4k", "Doubao-pro-128k", "Doubao-pro-32k",
46
+ "Doubao-pro-4k", "Llama3-Chinese-8B-Instruct", "Pro/meta-llama/Meta-Llama-3.1-8B-Instruct", "SparkDesk", "SparkDesk-v1.1",
47
+ "SparkDesk-v2.1", "SparkDesk-v3.1", "SparkDesk-v3.5", "SparkDesk-v4.0", "claude-3-haiku", "dall-e-3",
48
+ "deepseek-ai/DeepSeek-Coder-V2-Instruct", "deepseek-ai/DeepSeek-V2-Chat", "deepseek-ai/deepseek-llm-67b-chat", "gemini-1.0-pro-001",
49
+ "gemini-1.0-pro-latest", "gemini-1.0-pro-vision-001", "gemini-1.0-pro-vision-latest", "gemini-1.5-flash-latest", "gemini-1.5-pro-latest",
50
+ "gemini-ultra", "gpt-3.5-turbo", "llama-3-70b", "meta-llama/Meta-Llama-3.1-405B-Instruct", "meta-llama/Meta-Llama-3.1-70B-Instruct",
51
+ "mixtral-8x7b"
52
+ ]
53
  # --- --- --- ---
54
  # P.S. 其他可用的模型还包括
55
  # AVAIL_LLM_MODELS = [