Spaces:
Sleeping
Sleeping
Update appStore/rag.py
Browse files- appStore/rag.py +13 -11
appStore/rag.py
CHANGED
@@ -193,21 +193,23 @@ def run_query(context, label):
|
|
193 |
'''
|
194 |
chatbot_role = """You are an analyst specializing in climate change impact assessments and producing insights from policy documents."""
|
195 |
|
|
|
|
|
196 |
# res = openai.ChatCompletion.create(model=model_select, messages=[{"role": "user", "content": get_prompt(docs)}])
|
197 |
# result = res.choices[0].message.content
|
198 |
|
199 |
# Initialize the client, pointing it to one of the available models
|
200 |
-
client = InferenceClient()
|
201 |
-
|
202 |
-
response = client.chat.completions.create(
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
)
|
211 |
|
212 |
# iterate and print stream
|
213 |
# for message in chat_completion:
|
|
|
193 |
'''
|
194 |
chatbot_role = """You are an analyst specializing in climate change impact assessments and producing insights from policy documents."""
|
195 |
|
196 |
+
messages = [{"role": "system", "content": chatbot_role},{"role": "user", "content": get_prompt(context, label)}]
|
197 |
+
|
198 |
# res = openai.ChatCompletion.create(model=model_select, messages=[{"role": "user", "content": get_prompt(docs)}])
|
199 |
# result = res.choices[0].message.content
|
200 |
|
201 |
# Initialize the client, pointing it to one of the available models
|
202 |
+
client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct", token = hf_token)
|
203 |
+
|
204 |
+
# response = client.chat.completions.create(
|
205 |
+
# model="meta-llama/Meta-Llama-3.1-8B-Instruct",
|
206 |
+
# messages=[
|
207 |
+
# ChatMessage(role="system", content=chatbot_role),
|
208 |
+
# ChatMessage(role="user", content=get_prompt(context, label)),
|
209 |
+
# ],
|
210 |
+
# stream=True,
|
211 |
+
# max_tokens=500
|
212 |
+
# )
|
213 |
|
214 |
# iterate and print stream
|
215 |
# for message in chat_completion:
|