complynx commited on
Commit
d3fc948
Β·
1 Parent(s): a029f92

run llama in the other space

Browse files
Files changed (1) hide show
  1. app.py +11 -2
app.py CHANGED
@@ -12,7 +12,15 @@ from jinja2 import Environment, FileSystemLoader
12
  from backend.query_llm import generate_hf, generate_openai, hf_models, openai_models
13
  from backend.semantic_search import retrieve
14
  import itertools
 
15
 
 
 
 
 
 
 
 
16
  inf_models = list(hf_models.keys()) + list(openai_models)
17
 
18
  emb_models = ["bge", "minilm"]
@@ -82,8 +90,9 @@ def bot(history, model_name, oepnai_api_key,
82
  prompt = template.render(documents=documents, query=query)
83
  prompt_html = template_html.render(documents=documents, query=query)
84
 
85
-
86
- if model_name in hf_models:
 
87
  generate_fn = generate_hf
88
  elif model_name in openai_models:
89
  generate_fn = generate_openai
 
12
  from backend.query_llm import generate_hf, generate_openai, hf_models, openai_models
13
  from backend.semantic_search import retrieve
14
  import itertools
15
+ from gradio_client import Client
16
 
17
+ client = Client("Be-Bo/llama-3-chatbot_70b")
18
+
19
+ def run_llama(_, msg, *__):
20
+ return client.predict(
21
+ message=msg,
22
+ api_name="/chat"
23
+ )
24
  inf_models = list(hf_models.keys()) + list(openai_models)
25
 
26
  emb_models = ["bge", "minilm"]
 
90
  prompt = template.render(documents=documents, query=query)
91
  prompt_html = template_html.render(documents=documents, query=query)
92
 
93
+ if model_name == "llama 3":
94
+ generate_fn = run_llama
95
+ elif model_name in hf_models:
96
  generate_fn = generate_hf
97
  elif model_name in openai_models:
98
  generate_fn = generate_openai