tiendung commited on
Commit
538f422
·
verified ·
1 Parent(s): 89ab01a

Update llm.py

Browse files
Files changed (1) hide show
  1. llm.py +2 -2
llm.py CHANGED
@@ -20,7 +20,7 @@ import google.generativeai as genai # pip install -U -q google-generativeai
20
  llm_log_filename = f"{location__}/.cache/llm.log"
21
 
22
 
23
- genai.configure(api_key="AIzaSyAUeHVWLkYioIGk6PMbCTqk73PowHCIyPM")
24
 
25
  GEMINI_CLIENT = genai.GenerativeModel(GEMINI_MODEL, \
26
  generation_config = genai.GenerationConfig(
@@ -93,7 +93,7 @@ elif thinker in "70b|405b":
93
  CTXLEN = CTXLEN*1024 - MAX_OUTPUT_TOKENS
94
 
95
  from together import Together
96
- together_client = Together(api_key='adc0db56b77fe6508bdeadb4d8253771750a50639f8e87313153e49d4599f6ea')
97
  ###
98
  stops = ["<|eot_id|>","<|eom_id|>","</answer>","</output>"]
99
  def thinker_chat(prompt, history=[], stream=False, use_cache=True, testing=False):
 
20
  llm_log_filename = f"{location__}/.cache/llm.log"
21
 
22
 
23
+ genai.configure(api_key="")
24
 
25
  GEMINI_CLIENT = genai.GenerativeModel(GEMINI_MODEL, \
26
  generation_config = genai.GenerationConfig(
 
93
  CTXLEN = CTXLEN*1024 - MAX_OUTPUT_TOKENS
94
 
95
  from together import Together
96
+ together_client = Together(api_key=)
97
  ###
98
  stops = ["<|eot_id|>","<|eom_id|>","</answer>","</output>"]
99
  def thinker_chat(prompt, history=[], stream=False, use_cache=True, testing=False):