Spaces:
Running
Running
Update config_private.py
Browse files- config_private.py +17 -17
config_private.py
CHANGED
@@ -31,24 +31,24 @@ else:
|
|
31 |
proxies = None
|
32 |
|
33 |
# [step 3]>> 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
|
34 |
-
LLM_MODEL = "gemini-1.5-pro-latest" # 可选 ↓↓↓
|
35 |
AVAIL_LLM_MODELS = [
|
36 |
-
"
|
37 |
-
"
|
38 |
-
"
|
39 |
-
"
|
40 |
-
"
|
41 |
-
"
|
42 |
-
"
|
43 |
-
"
|
44 |
-
"
|
45 |
-
"Atom-1B-Chat", "Atom-7B-Chat", "Doubao-lite-128k", "Doubao-lite-32k", "Doubao-lite-4k", "Doubao-pro-128k", "Doubao-pro-32k",
|
46 |
-
"Doubao-pro-4k", "Llama3-Chinese-8B-Instruct", "Pro/meta-llama/Meta-Llama-3.1-8B-Instruct", "SparkDesk", "SparkDesk-v1.1",
|
47 |
-
"SparkDesk-v2.1", "SparkDesk-v3.1", "SparkDesk-v3.5", "SparkDesk-v4.0", "claude-3-haiku", "dall-e-3",
|
48 |
-
"deepseek-ai/DeepSeek-Coder-V2-Instruct", "deepseek-ai/DeepSeek-V2-Chat", "deepseek-ai/deepseek-llm-67b-chat", "gemini-1.0-pro-001",
|
49 |
-
"gemini-1.0-pro-latest", "gemini-1.0-pro-vision-001", "gemini-1.0-pro-vision-latest", "gemini-1.5-flash-latest", "gemini-1.5-pro-latest",
|
50 |
-
"gemini-ultra", "gpt-3.5-turbo", "llama-3-70b", "meta-llama/Meta-Llama-3.1-405B-Instruct", "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
51 |
-
"mixtral-8x7b"
|
52 |
]
|
53 |
# --- --- --- ---
|
54 |
# P.S. 其他可用的模型还包括
|
|
|
31 |
proxies = None
|
32 |
|
33 |
# [step 3]>> 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
|
34 |
+
LLM_MODEL = "one-api-gemini-1.5-pro-latest" # 可选 ↓↓↓
|
35 |
AVAIL_LLM_MODELS = [
|
36 |
+
"one-api-@cf/deepseek-ai/deepseek-math-7b-base", "one-api-@cf/deepseek-ai/deepseek-math-7b-instruct", "one-api-@cf/defog/sqlcoder-7b-2", "one-api-@cf/google/gemma-2b-it-lora",
|
37 |
+
"one-api-@cf/google/gemma-7b-it-lora", "one-api-@cf/meta-llama/llama-2-7b-chat-hf-lora", "one-api-@cf/meta/llama-2-7b-chat-fp16", "one-api-@cf/meta/llama-2-7b-chat-int8",
|
38 |
+
"one-api-@cf/meta/llama-3-8b-instruct", "one-api-@cf/microsoft/phi-2", "one-api-@cf/mistral/mistral-7b-instruct-v0.1", "one-api-@cf/mistral/mistral-7b-instruct-v0.2-lora",
|
39 |
+
"one-api-@cf/openchat/openchat-3.5-0106", "one-api-@cf/qwen/qwen1.5-0.5b-chat", "one-api-@cf/qwen/qwen1.5-1.8b-chat", "one-api-@cf/qwen/qwen1.5-14b-chat-awq",
|
40 |
+
"one-api-@cf/qwen/qwen1.5-7b-chat-awq", "one-api-@cf/thebloke/discolm-german-7b-v1-awq", "one-api-@cf/tiiuae/falcon-7b-instruct",
|
41 |
+
"one-api-@cf/tinyllama/tinyllama-1.1b-chat-v1.0", "one-api-@hf/google/gemma-7b-it", "one-api-@hf/mistralai/mistral-7b-instruct-v0.2", "one-api-@hf/nexusflow/starling-lm-7b-beta",
|
42 |
+
"one-api-@hf/nousresearch/hermes-2-pro-mistral-7b", "one-api-@hf/thebloke/deepseek-coder-6.7b-base-awq", "one-api-@hf/thebloke/deepseek-coder-6.7b-instruct-awq",
|
43 |
+
"one-api-@hf/thebloke/llama-2-13b-chat-awq", "one-api-@hf/thebloke/llamaguard-7b-awq", "one-api-@hf/thebloke/mistral-7b-instruct-v0.1-awq",
|
44 |
+
"one-api-@hf/thebloke/neural-chat-7b-v3-1-awq", "one-api-@hf/thebloke/openhermes-2.5-mistral-7b-awq", "one-api-@hf/thebloke/zephyr-7b-beta-awq", "one-api-Atom-13B-Chat",
|
45 |
+
"one-api-Atom-1B-Chat", "one-api-Atom-7B-Chat", "one-api-Doubao-lite-128k", "one-api-Doubao-lite-32k", "one-api-Doubao-lite-4k", "one-api-Doubao-pro-128k", "one-api-Doubao-pro-32k",
|
46 |
+
"one-api-Doubao-pro-4k", "one-api-Llama3-Chinese-8B-Instruct", "one-api-Pro/meta-llama/Meta-Llama-3.1-8B-Instruct", "one-api-SparkDesk", "one-api-SparkDesk-v1.1",
|
47 |
+
"one-api-SparkDesk-v2.1", "one-api-SparkDesk-v3.1", "one-api-SparkDesk-v3.5", "one-api-SparkDesk-v4.0", "one-api-claude-3-haiku", "one-api-dall-e-3",
|
48 |
+
"one-api-deepseek-ai/DeepSeek-Coder-V2-Instruct", "one-api-deepseek-ai/DeepSeek-V2-Chat", "one-api-deepseek-ai/deepseek-llm-67b-chat", "one-api-gemini-1.0-pro-001",
|
49 |
+
"one-api-gemini-1.0-pro-latest", "one-api-gemini-1.0-pro-vision-001", "one-api-gemini-1.0-pro-vision-latest", "one-api-gemini-1.5-flash-latest", "one-api-gemini-1.5-pro-latest",
|
50 |
+
"one-api-gemini-ultra", "one-api-gpt-3.5-turbo", "one-api-llama-3-70b", "one-api-meta-llama/Meta-Llama-3.1-405B-Instruct", "one-api-meta-llama/Meta-Llama-3.1-70B-Instruct",
|
51 |
+
"one-api-mixtral-8x7b"
|
52 |
]
|
53 |
# --- --- --- ---
|
54 |
# P.S. 其他可用的模型还包括
|