complynx commited on
Commit
cb68ee9
Β·
1 Parent(s): 1110d46

change clients

Browse files
Files changed (2) hide show
  1. app.py +12 -6
  2. backend/query_llm.py +0 -3
app.py CHANGED
@@ -14,6 +14,13 @@ from backend.semantic_search import retrieve
14
  import itertools
15
  from gradio_client import Client
16
 
 
 
 
 
 
 
 
17
  client = Client("Be-Bo/llama-3-chatbot_70b")
18
 
19
  def run_llama(_, msg, *__):
@@ -22,13 +29,12 @@ def run_llama(_, msg, *__):
22
  api_name="/chat"
23
  )
24
 
25
- m7b01 = Client("https://zhuraavl-mistralai-mistral-7b-v0-1.hf.space/")
26
- def run_m7b01(_, msg, *__):
27
- yield m7b01.predict(
28
  msg,
29
  api_name="/predict"
30
  )
31
- inf_models = list(hf_models.keys()) + list(openai_models)
32
 
33
  emb_models = ["bge", "minilm"]
34
  splitters = ['ct', 'rct', 'nltk']
@@ -99,8 +105,8 @@ def bot(history, model_name, oepnai_api_key,
99
 
100
  if model_name == "llama 3":
101
  generate_fn = run_llama
102
- if model_name == "mistral-7B 0.1":
103
- generate_fn = run_m7b01
104
  elif model_name in hf_models:
105
  generate_fn = generate_hf
106
  elif model_name in openai_models:
 
14
  import itertools
15
  from gradio_client import Client
16
 
17
+
18
+ clients_eps = {
19
+ "wizardlm 13B": "https://itsmynti-ehartford-wizardlm-13b-uncensored.hf.space/",
20
+ "mistral 7b v0.1": "https://zhuraavl-mistralai-mistral-7b-v0-1.hf.space/",
21
+ }
22
+ clients = {k: Client(u) for k,u in clients_eps.items()}
23
+
24
  client = Client("Be-Bo/llama-3-chatbot_70b")
25
 
26
  def run_llama(_, msg, *__):
 
29
  api_name="/chat"
30
  )
31
 
32
+ def run_client(cli_name, msg, *__):
33
+ yield clients[cli_name].predict(
 
34
  msg,
35
  api_name="/predict"
36
  )
37
+ inf_models = list(hf_models.keys()) + list(openai_models) + list(clients_eps.keys())
38
 
39
  emb_models = ["bge", "minilm"]
40
  splitters = ['ct', 'rct', 'nltk']
 
105
 
106
  if model_name == "llama 3":
107
  generate_fn = run_llama
108
+ if model_name in clients_eps:
109
+ generate_fn = run_client
110
  elif model_name in hf_models:
111
  generate_fn = generate_hf
112
  elif model_name in openai_models:
backend/query_llm.py CHANGED
@@ -12,9 +12,6 @@ HF_TOKEN = os.getenv("HF_TOKEN")
12
 
13
  hf_models = {
14
  "mistral-7B": "mistralai/Mistral-7B-Instruct-v0.2",
15
- "mistral-7B 0.1": "mistralai/Mistral-7B-v0.1",
16
- # "vicuna-13b":"lmsys/vicuna-13b-v1.5",
17
- # "WizardLM-30B": "cognitivecomputations/WizardLM-30B-Uncensored",
18
  "llama 3": "meta-llama/Meta-Llama-3-70B-Instruct",
19
  }
20
  openai_models = {"gpt-4o","gpt-3.5-turbo-0125"}
 
12
 
13
  hf_models = {
14
  "mistral-7B": "mistralai/Mistral-7B-Instruct-v0.2",
 
 
 
15
  "llama 3": "meta-llama/Meta-Llama-3-70B-Instruct",
16
  }
17
  openai_models = {"gpt-4o","gpt-3.5-turbo-0125"}