Update llm.py
Browse files
llm.py
CHANGED
@@ -20,7 +20,7 @@ import google.generativeai as genai # pip install -U -q google-generativeai
|
|
20 |
llm_log_filename = f"{location__}/.cache/llm.log"
|
21 |
|
22 |
|
23 |
-
genai.configure(api_key="
|
24 |
|
25 |
GEMINI_CLIENT = genai.GenerativeModel(GEMINI_MODEL, \
|
26 |
generation_config = genai.GenerationConfig(
|
@@ -93,7 +93,7 @@ elif thinker in "70b|405b":
|
|
93 |
CTXLEN = CTXLEN*1024 - MAX_OUTPUT_TOKENS
|
94 |
|
95 |
from together import Together
|
96 |
-
together_client = Together(api_key=
|
97 |
###
|
98 |
stops = ["<|eot_id|>","<|eom_id|>","</answer>","</output>"]
|
99 |
def thinker_chat(prompt, history=[], stream=False, use_cache=True, testing=False):
|
|
|
20 |
llm_log_filename = f"{location__}/.cache/llm.log"
|
21 |
|
22 |
|
23 |
+
genai.configure(api_key="")
|
24 |
|
25 |
GEMINI_CLIENT = genai.GenerativeModel(GEMINI_MODEL, \
|
26 |
generation_config = genai.GenerationConfig(
|
|
|
93 |
CTXLEN = CTXLEN*1024 - MAX_OUTPUT_TOKENS
|
94 |
|
95 |
from together import Together
|
96 |
+
together_client = Together(api_key=)
|
97 |
###
|
98 |
stops = ["<|eot_id|>","<|eom_id|>","</answer>","</output>"]
|
99 |
def thinker_chat(prompt, history=[], stream=False, use_cache=True, testing=False):
|