Spaces:
Running
Running
Commit
·
0031a0c
1
Parent(s):
7a4a88c
init
Browse files- .gitignore +7 -0
- Dockerfile +13 -0
- bot/__init__.py +6 -0
- bot/llm_client.py +35 -0
- bot/panda_bot.py +203 -0
- requirements.txt +136 -0
- server.py +66 -0
- tools/__init__.py +35 -0
- tools/tools_llm.py +113 -0
.gitignore
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__pycache__/
|
2 |
+
*.pyc
|
3 |
+
|
4 |
+
env/
|
5 |
+
|
6 |
+
final_test/
|
7 |
+
models/
|
Dockerfile
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.11
|
2 |
+
|
3 |
+
RUN useradd -m -u 1000 user
|
4 |
+
USER user
|
5 |
+
ENV PATH="/home/user/.local/bin:$PATH"
|
6 |
+
|
7 |
+
WORKDIR /server
|
8 |
+
|
9 |
+
COPY --chown=user ./requirements.txt requirements.txt
|
10 |
+
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
11 |
+
|
12 |
+
COPY --chown=user . /server
|
13 |
+
CMD ["uvicorn", "app:server", "--host", "0.0.0.0", "--port", "7860"]
|
bot/__init__.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from bot.panda_bot import PandaBot
|
2 |
+
from langchain.memory import ConversationBufferMemory
|
3 |
+
|
4 |
+
memory = ConversationBufferMemory(memory_key="chat_history")
|
5 |
+
|
6 |
+
llm = PandaBot(memory=memory)
|
bot/llm_client.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.llms.base import LLM
|
2 |
+
from typing import Optional, List, Mapping, Any
|
3 |
+
from groq import Groq
|
4 |
+
import os
|
5 |
+
import requests
|
6 |
+
|
7 |
+
# HOST = '127.0.0.1:5000'
|
8 |
+
URI = f'https://api.groq.com/openai/v1/chat/completions'
|
9 |
+
|
10 |
+
client = Groq(
|
11 |
+
# This is the default and can be omitted
|
12 |
+
api_key='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',
|
13 |
+
)
|
14 |
+
|
15 |
+
|
16 |
+
class Mistral(LLM):
|
17 |
+
@property
|
18 |
+
def _llm_type(self) -> str:
|
19 |
+
return "custom"
|
20 |
+
|
21 |
+
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
|
22 |
+
if isinstance(stop, list):
|
23 |
+
stop = stop + ["\n###","\nObservation:",'\nObservation:']
|
24 |
+
|
25 |
+
response = client.chat.completions.create(stop=stop,temperature=0.0,max_completion_tokens=256, messages=[{
|
26 |
+
'role': 'user',
|
27 |
+
'content': prompt
|
28 |
+
}], model='llama3-70b-8192', )
|
29 |
+
|
30 |
+
return response.choices[0].message.content
|
31 |
+
|
32 |
+
@property
|
33 |
+
def _identifying_params(self) -> Mapping[str, Any]:
|
34 |
+
"""Get the identifying parameters."""
|
35 |
+
return {}
|
bot/panda_bot.py
ADDED
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from tools.tools_llm import *
|
2 |
+
|
3 |
+
from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser
|
4 |
+
from langchain.prompts import StringPromptTemplate
|
5 |
+
from langchain import OpenAI, SerpAPIWrapper, LLMChain
|
6 |
+
from typing import List, Union, Any, Optional, Type
|
7 |
+
from langchain.schema import AgentAction, AgentFinish
|
8 |
+
import re
|
9 |
+
|
10 |
+
from bot.llm_client import Mistral
|
11 |
+
from langchain.tools import BaseTool, StructuredTool, tool
|
12 |
+
from langchain.memory import ConversationBufferMemory
|
13 |
+
|
14 |
+
import os
|
15 |
+
|
16 |
+
template = """You are **Panda**, a chatbot designed to assist **hemodialysis patients** by providing support and information related to their treatment and health.
|
17 |
+
|
18 |
+
Panda's Role: Panda's main goal is to offer assistance with a variety of tasks, including answering questions and providing in-depth explanations about hemodialysis, kidney failure, and related health topics. Panda can engage in natural-sounding conversations by generating human-like text responses that are coherent, relevant, and compassionate.
|
19 |
+
|
20 |
+
Panda is a learning AI and continually improves its responses by processing and understanding large amounts of information. Panda’s objective is to ensure that hemodialysis patients feel supported, informed, and cared for by delivering accurate, helpful, and timely information.
|
21 |
+
|
22 |
+
Patient Care Focus: As an assistant, Panda should regularly check in on the patient's well-being by asking if everything is all right and if they have any questions or concerns regarding their treatment. Panda must also provide proactive support services, such as reminders, symptom management, or information sharing.
|
23 |
+
|
24 |
+
Patient Information:
|
25 |
+
|
26 |
+
- Patient Name: '{username}'
|
27 |
+
- Hemodialysis Schedule: Tuesday and Friday at 08:00 AM
|
28 |
+
|
29 |
+
Panda should always remember this schedule and provide reminders or updates when relevant.
|
30 |
+
|
31 |
+
---
|
32 |
+
|
33 |
+
Available Tools:
|
34 |
+
|
35 |
+
Panda can access the following tools to assist the patient:
|
36 |
+
|
37 |
+
{tools}
|
38 |
+
|
39 |
+
---
|
40 |
+
|
41 |
+
To use a tool, please use the following format:
|
42 |
+
|
43 |
+
```
|
44 |
+
|
45 |
+
Thought: Do I need to use a tool? Yes
|
46 |
+
Action: the action to take, should be one of [{tool_names}]
|
47 |
+
Action Input: the input to the action
|
48 |
+
Observation: the result of the action
|
49 |
+
|
50 |
+
```
|
51 |
+
|
52 |
+
When you have a response to say to the Patient, or if you do not need to use a tool, you MUST use the format:
|
53 |
+
|
54 |
+
```
|
55 |
+
|
56 |
+
Thought: Do I need to use a tool? No
|
57 |
+
Final Answer: [Your response to the Patient]
|
58 |
+
|
59 |
+
```
|
60 |
+
---
|
61 |
+
|
62 |
+
Guidelines for Tool Usage:
|
63 |
+
|
64 |
+
- If the patient mentions symptoms or health concerns that may need immediate attention, use the 'send_emergency_message_to_medic' tool to alert the healthcare team.
|
65 |
+
|
66 |
+
- If the patient asks for medical advice or information related to hemodialysis or their condition, use the 'search_information_for_question' tool to find and provide relevant information.
|
67 |
+
|
68 |
+
---
|
69 |
+
|
70 |
+
Goal: Your goal is to ensure the patient feels well-informed, safe, and supported. Whether they need medical information, help with managing symptoms, or even just reassurance, Panda is here to help.
|
71 |
+
|
72 |
+
---
|
73 |
+
|
74 |
+
Begin!
|
75 |
+
|
76 |
+
Previous conversation history:
|
77 |
+
|
78 |
+
{chat_history}
|
79 |
+
|
80 |
+
New input: {input}
|
81 |
+
|
82 |
+
{agent_scratchpad}
|
83 |
+
"""
|
84 |
+
|
85 |
+
class CustomPromptTemplate(StringPromptTemplate):
|
86 |
+
"""Schema to represent a prompt for an LLM.
|
87 |
+
|
88 |
+
Example:
|
89 |
+
.. code-block:: python
|
90 |
+
|
91 |
+
from langchain import PromptTemplate
|
92 |
+
prompt = PromptTemplate(input_variables=["foo"], template="Say {foo}")
|
93 |
+
"""
|
94 |
+
|
95 |
+
input_variables: List[str]
|
96 |
+
"""A list of the names of the variables the prompt template expects."""
|
97 |
+
|
98 |
+
template: str
|
99 |
+
"""The prompt template."""
|
100 |
+
|
101 |
+
template_format: str = "f-string"
|
102 |
+
"""The format of the prompt template. Options are: 'f-string', 'jinja2'."""
|
103 |
+
|
104 |
+
validate_template: bool = False
|
105 |
+
"""Whether or not to try validating the template."""
|
106 |
+
|
107 |
+
tools_getter: List[StructuredTool]
|
108 |
+
username: str
|
109 |
+
|
110 |
+
def format(self, **kwargs) -> str:
|
111 |
+
# Get the intermediate steps (AgentAction, Observation tuples)
|
112 |
+
# Format them in a particular way
|
113 |
+
intermediate_steps = kwargs.pop("intermediate_steps")
|
114 |
+
thoughts = ""
|
115 |
+
for action, observation in intermediate_steps:
|
116 |
+
thoughts += action.log
|
117 |
+
thoughts += f"\nObservation: {observation}"
|
118 |
+
# Set the agent_scratchpad variable to that value
|
119 |
+
kwargs["agent_scratchpad"] = thoughts
|
120 |
+
############## NEW ######################
|
121 |
+
# Create a tools variable from the list of tools provided
|
122 |
+
kwargs["tools"] = "\n".join(
|
123 |
+
[f"{tool.name}: {tool.description}" for tool in self.tools_getter]
|
124 |
+
)
|
125 |
+
# Create a list of tool names for the tools provided
|
126 |
+
kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools_getter])
|
127 |
+
kwargs["username"] = self.username
|
128 |
+
|
129 |
+
return self.template.format(**kwargs)
|
130 |
+
|
131 |
+
class CustomOutputParser(AgentOutputParser):
|
132 |
+
|
133 |
+
def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
|
134 |
+
# Check if agent should finish
|
135 |
+
if "Final Answer:" in llm_output:
|
136 |
+
return AgentFinish(
|
137 |
+
# Return values is generally always a dictionary with a single `output` key
|
138 |
+
# It is not recommended to try anything else at the moment :)
|
139 |
+
return_values={"output": llm_output.split("Final Answer:")[-1].strip()},
|
140 |
+
log=llm_output,
|
141 |
+
)
|
142 |
+
elif "Answer:" in llm_output:
|
143 |
+
return AgentFinish(
|
144 |
+
# Return values is generally always a dictionary with a single `output` key
|
145 |
+
# It is not recommended to try anything else at the moment :)
|
146 |
+
return_values={"output": llm_output.split("Answer:")[-1].strip()},
|
147 |
+
log=llm_output,
|
148 |
+
)
|
149 |
+
|
150 |
+
# Parse out the action and action input
|
151 |
+
regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
|
152 |
+
|
153 |
+
match = re.search(regex, llm_output, re.DOTALL)
|
154 |
+
|
155 |
+
if not match:
|
156 |
+
raise ValueError(f"Could not parse LLM output: `{llm_output}`")
|
157 |
+
if match:
|
158 |
+
action = match.group(1).strip() #Action send_api
|
159 |
+
action_input = match.group(2) #Action Input "What is the weather today?"
|
160 |
+
# Return the action and action input
|
161 |
+
return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output)
|
162 |
+
|
163 |
+
class PandaBot:
|
164 |
+
def __init__(self, memory=None):
|
165 |
+
# Initialize memory if provided, otherwise create a new one
|
166 |
+
self.memory = memory if memory else ConversationBufferMemory(memory_key="chat_history")
|
167 |
+
# Initialize LLM, prompt, output parser, and tool list
|
168 |
+
self.llm = Mistral()
|
169 |
+
self.tools_list = [send_emergency_message_to_medic, search_information_for_question]
|
170 |
+
self.tool_names = [i.name for i in self.tools_list]
|
171 |
+
self.output_parser = CustomOutputParser()
|
172 |
+
os.environ["SERPER_API_KEY"] = 'f90fe84e78ef9d2d8e377ab5c6fe3a4a25f42ef0'
|
173 |
+
os.environ["LANGCHAIN_TRACING_V2"] = 'true'
|
174 |
+
os.environ["LANGCHAIN_ENDPOINT"] = 'https://api.smith.langchain.com'
|
175 |
+
os.environ["LANGCHAIN_API_KEY"] = 'ls__413a7563fa034592be6c6a241176932a'
|
176 |
+
os.environ["LANGCHAIN_PROJECT"] = 'LLM Patient Monitoring'
|
177 |
+
|
178 |
+
|
179 |
+
def query(self, input_text: str, username: str) -> str:
|
180 |
+
prompt = CustomPromptTemplate(
|
181 |
+
input_variables=["input", "intermediate_steps", "chat_history"],
|
182 |
+
template=template,
|
183 |
+
validate_template=False,
|
184 |
+
tools_getter=self.tools_list,
|
185 |
+
username=username)
|
186 |
+
|
187 |
+
self.llm_chains = LLMChain(llm=self.llm, prompt=prompt)
|
188 |
+
|
189 |
+
self.agent = LLMSingleActionAgent(
|
190 |
+
llm_chain=self.llm_chains,
|
191 |
+
output_parser=self.output_parser,
|
192 |
+
stop=["\nObservation:"],
|
193 |
+
allowed_tools=self.tool_names,
|
194 |
+
)
|
195 |
+
self.agent_executor = AgentExecutor.from_agent_and_tools(agent=self.agent,
|
196 |
+
tools=self.tools_list,
|
197 |
+
verbose=True,
|
198 |
+
memory=self.memory)
|
199 |
+
|
200 |
+
return self.agent_executor.run(input_text)
|
201 |
+
|
202 |
+
def show_memory(self):
|
203 |
+
return self.memory.get_memory()
|
requirements.txt
ADDED
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
aiohappyeyeballs==2.4.6
|
2 |
+
aiohttp==3.11.12
|
3 |
+
aiosignal==1.3.2
|
4 |
+
annotated-types==0.7.0
|
5 |
+
anyio==4.8.0
|
6 |
+
asgiref==3.8.1
|
7 |
+
attrs==25.1.0
|
8 |
+
backoff==2.2.1
|
9 |
+
bcrypt==4.2.1
|
10 |
+
beautifulsoup4==4.13.3
|
11 |
+
blinker==1.9.0
|
12 |
+
build==1.2.2.post1
|
13 |
+
cachetools==5.5.2
|
14 |
+
certifi==2025.1.31
|
15 |
+
charset-normalizer==3.4.1
|
16 |
+
chroma-hnswlib==0.7.6
|
17 |
+
chromadb==0.6.3
|
18 |
+
click==8.1.8
|
19 |
+
colorama==0.4.6
|
20 |
+
coloredlogs==15.0.1
|
21 |
+
dataclasses-json==0.6.7
|
22 |
+
deep-translator==1.11.4
|
23 |
+
Deprecated==1.2.18
|
24 |
+
distro==1.9.0
|
25 |
+
durationpy==0.9
|
26 |
+
fastapi==0.115.8
|
27 |
+
filelock==3.17.0
|
28 |
+
Flask==3.1.0
|
29 |
+
flatbuffers==25.2.10
|
30 |
+
frozenlist==1.5.0
|
31 |
+
fsspec==2025.2.0
|
32 |
+
google-auth==2.38.0
|
33 |
+
googleapis-common-protos==1.68.0
|
34 |
+
greenlet==3.1.1
|
35 |
+
groq==0.18.0
|
36 |
+
grpcio==1.70.0
|
37 |
+
gunicorn==23.0.0
|
38 |
+
h11==0.14.0
|
39 |
+
httpcore==1.0.7
|
40 |
+
httptools==0.6.4
|
41 |
+
httpx==0.28.1
|
42 |
+
httpx-sse==0.4.0
|
43 |
+
huggingface-hub==0.29.1
|
44 |
+
humanfriendly==10.0
|
45 |
+
idna==3.10
|
46 |
+
importlib_metadata==8.5.0
|
47 |
+
importlib_resources==6.5.2
|
48 |
+
itsdangerous==2.2.0
|
49 |
+
Jinja2==3.1.5
|
50 |
+
joblib==1.4.2
|
51 |
+
jsonpatch==1.33
|
52 |
+
jsonpointer==3.0.0
|
53 |
+
kubernetes==32.0.1
|
54 |
+
langchain==0.3.19
|
55 |
+
langchain-community==0.3.18
|
56 |
+
langchain-core==0.3.37
|
57 |
+
langchain-text-splitters==0.3.6
|
58 |
+
langsmith==0.3.10
|
59 |
+
markdown-it-py==3.0.0
|
60 |
+
MarkupSafe==3.0.2
|
61 |
+
marshmallow==3.26.1
|
62 |
+
mdurl==0.1.2
|
63 |
+
mmh3==5.1.0
|
64 |
+
monotonic==1.6
|
65 |
+
mpmath==1.3.0
|
66 |
+
multidict==6.1.0
|
67 |
+
mypy-extensions==1.0.0
|
68 |
+
networkx==3.4.2
|
69 |
+
numpy==1.26.4
|
70 |
+
oauthlib==3.2.2
|
71 |
+
onnxruntime==1.20.1
|
72 |
+
opentelemetry-api==1.30.0
|
73 |
+
opentelemetry-exporter-otlp-proto-common==1.30.0
|
74 |
+
opentelemetry-exporter-otlp-proto-grpc==1.30.0
|
75 |
+
opentelemetry-instrumentation==0.51b0
|
76 |
+
opentelemetry-instrumentation-asgi==0.51b0
|
77 |
+
opentelemetry-instrumentation-fastapi==0.51b0
|
78 |
+
opentelemetry-proto==1.30.0
|
79 |
+
opentelemetry-sdk==1.30.0
|
80 |
+
opentelemetry-semantic-conventions==0.51b0
|
81 |
+
opentelemetry-util-http==0.51b0
|
82 |
+
orjson==3.10.15
|
83 |
+
overrides==7.7.0
|
84 |
+
packaging==23.2
|
85 |
+
pillow==11.1.0
|
86 |
+
posthog==3.15.0
|
87 |
+
propcache==0.3.0
|
88 |
+
protobuf==5.29.3
|
89 |
+
pyasn1==0.6.1
|
90 |
+
pyasn1_modules==0.4.1
|
91 |
+
pydantic==2.10.6
|
92 |
+
pydantic-settings==2.8.0
|
93 |
+
pydantic_core==2.27.2
|
94 |
+
Pygments==2.19.1
|
95 |
+
PyPika==0.48.9
|
96 |
+
pyproject_hooks==1.2.0
|
97 |
+
pyreadline3==3.5.4
|
98 |
+
python-dateutil==2.9.0.post0
|
99 |
+
python-dotenv==1.0.1
|
100 |
+
PyYAML==6.0.2
|
101 |
+
regex==2024.11.6
|
102 |
+
requests==2.32.3
|
103 |
+
requests-oauthlib==2.0.0
|
104 |
+
requests-toolbelt==1.0.0
|
105 |
+
rich==13.9.4
|
106 |
+
rsa==4.9
|
107 |
+
safetensors==0.5.2
|
108 |
+
scikit-learn==1.6.1
|
109 |
+
scipy==1.15.2
|
110 |
+
sentence-transformers==3.4.1
|
111 |
+
shellingham==1.5.4
|
112 |
+
six==1.17.0
|
113 |
+
sniffio==1.3.1
|
114 |
+
soupsieve==2.6
|
115 |
+
SQLAlchemy==2.0.38
|
116 |
+
starlette==0.45.3
|
117 |
+
sympy==1.13.1
|
118 |
+
tenacity==8.5.0
|
119 |
+
threadpoolctl==3.5.0
|
120 |
+
tokenizers==0.21.0
|
121 |
+
torch==2.6.0
|
122 |
+
tqdm==4.67.1
|
123 |
+
transformers==4.49.0
|
124 |
+
typer==0.15.1
|
125 |
+
typing-inspect==0.9.0
|
126 |
+
typing_extensions==4.12.2
|
127 |
+
urllib3==2.3.0
|
128 |
+
uvicorn==0.34.0
|
129 |
+
watchfiles==1.0.4
|
130 |
+
websocket-client==1.8.0
|
131 |
+
websockets==15.0
|
132 |
+
Werkzeug==3.1.3
|
133 |
+
wrapt==1.17.2
|
134 |
+
yarl==1.18.3
|
135 |
+
zipp==3.21.0
|
136 |
+
zstandard==0.23.0
|
server.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from bot import llm
|
2 |
+
# from tools import model_llm_rag
|
3 |
+
from flask import Flask, request, jsonify
|
4 |
+
# from langchain_core.messages import HumanMessage, AIMessage
|
5 |
+
# from langchain.prompts import ChatPromptTemplate
|
6 |
+
# from deep_translator import GoogleTranslator
|
7 |
+
|
8 |
+
|
9 |
+
app = Flask(__name__)
|
10 |
+
|
11 |
+
PROMPT_TEMPLATE = """
|
12 |
+
You are Panda, a hemodialysis chatbot that assists patients with their treatment. You are assisting a patient named {username}. Greet them appropriately based on the current time of day (good morning, good afternoon, or good evening).
|
13 |
+
|
14 |
+
Since this is the first conversation of the day, you must remind the patient about their hemodialysis schedule by identifying the next upcoming session based on the current date and time. Ensure you use the current date and time to provide the correct information, whether the next session is today or later in the week.
|
15 |
+
|
16 |
+
Ask the patient if they have any questions or concerns about their treatment.
|
17 |
+
|
18 |
+
---
|
19 |
+
Current day: Monday
|
20 |
+
|
21 |
+
Current date: 2024/05/12
|
22 |
+
|
23 |
+
Current time: 07:00:00 AM
|
24 |
+
|
25 |
+
Hemodialysis Schedules: Tuesday and Friday at 08:00 AM
|
26 |
+
|
27 |
+
Patient's Name: {username}
|
28 |
+
---
|
29 |
+
Output:
|
30 |
+
|
31 |
+
"""
|
32 |
+
|
33 |
+
# @app.route('/first_message', methods=['POST'])
|
34 |
+
# def first_message():
|
35 |
+
# data = request.json
|
36 |
+
# username = data.get('username')
|
37 |
+
# current_date_time = data.get('current_date_time')
|
38 |
+
# prompt_template = ChatPromptTemplate.from_template(PROMPT_TEMPLATE)
|
39 |
+
# prompt = prompt_template.format(username=username)
|
40 |
+
# response = model_llm_rag.invoke(prompt)
|
41 |
+
# translate = GoogleTranslator(source='en', target='id').translate(response)
|
42 |
+
# llm.memory.chat_memory.add_ai_message(AIMessage(response))
|
43 |
+
# return jsonify({"response": translate})
|
44 |
+
|
45 |
+
def simulate_llm_query(user_input, username):
|
46 |
+
"""
|
47 |
+
Simulates querying a language model.
|
48 |
+
Replace this function's logic with actual LLM querying.
|
49 |
+
"""
|
50 |
+
# Placeholder response logic, replace with actual LLM integration
|
51 |
+
return llm.query(user_input, username)
|
52 |
+
|
53 |
+
@app.route('/query', methods=['POST'])
|
54 |
+
def query_llm():
|
55 |
+
data = request.json
|
56 |
+
user_input = data.get('input')
|
57 |
+
username = data.get('username')
|
58 |
+
|
59 |
+
if not user_input:
|
60 |
+
return jsonify({"error": "No input provided"}), 400
|
61 |
+
|
62 |
+
response = simulate_llm_query(user_input, username)
|
63 |
+
return jsonify({"response": response})
|
64 |
+
|
65 |
+
if __name__ == '__main__':
|
66 |
+
app.run(host='0.0.0.0', port=6541)
|
tools/__init__.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# import os
|
2 |
+
# from langchain.embeddings import HuggingFaceEmbeddings
|
3 |
+
# from langchain.vectorstores.chroma import Chroma
|
4 |
+
# from langchain_community.cross_encoders import HuggingFaceCrossEncoder
|
5 |
+
# import time
|
6 |
+
# from langchain.retrievers import ContextualCompressionRetriever
|
7 |
+
# from langchain.retrievers.document_compressors import CrossEncoderReranker
|
8 |
+
# from bot.llm_client import Mistral
|
9 |
+
|
10 |
+
|
11 |
+
# os.environ["SERPER_API_KEY"] = 'TOKEN'
|
12 |
+
# CHROMA_PATH = "final_test/chroma_test"
|
13 |
+
# CHROMA_PATH = "final_test/chroma_test2"
|
14 |
+
|
15 |
+
|
16 |
+
# def load_embedding_model(model_path : str):
|
17 |
+
# start_time = time.time()
|
18 |
+
# encode_kwargs = {"normalize_embeddings": True}
|
19 |
+
# local_embedding = HuggingFaceEmbeddings(
|
20 |
+
# model_name=model_path,
|
21 |
+
# cache_folder="./models",
|
22 |
+
# encode_kwargs=encode_kwargs
|
23 |
+
# )
|
24 |
+
# end_time = time.time()
|
25 |
+
# print(f'model load time {round(end_time - start_time, 0)} second')
|
26 |
+
# return local_embedding
|
27 |
+
|
28 |
+
# embedding = load_embedding_model(model_path="intfloat/multilingual-e5-large")
|
29 |
+
|
30 |
+
# reranker_model = HuggingFaceCrossEncoder(model_name="BAAI/bge-reranker-v2-m3")
|
31 |
+
|
32 |
+
# db = Chroma(persist_directory=CHROMA_PATH, embedding_function=embedding).as_retriever(search_kwargs={"k": 20})
|
33 |
+
|
34 |
+
# model_llm_rag = Mistral()
|
35 |
+
|
tools/tools_llm.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.tools import BaseTool, StructuredTool, tool
|
2 |
+
from langchain.utilities import GoogleSerperAPIWrapper
|
3 |
+
import requests
|
4 |
+
import random
|
5 |
+
import json
|
6 |
+
# from tools import db, reranker_model, model_llm_rag
|
7 |
+
# from langchain.retrievers import ContextualCompressionRetriever
|
8 |
+
# from langchain.retrievers.document_compressors import CrossEncoderReranker
|
9 |
+
# from deep_translator import GoogleTranslator
|
10 |
+
# from langchain.prompts import ChatPromptTemplate
|
11 |
+
|
12 |
+
PROMPT_TEMPLATE = PROMPT_TEMPLATE = """### Instruction:
|
13 |
+
Your job is to answer the question based on the given pieces of information. All you have to do is answer the question. Not all of the information provided may be relevant to the question. the answer you create must be logical. Each piece of information will be separated by '---'.
|
14 |
+
|
15 |
+
### Example:
|
16 |
+
Question: What are the benefits of regular exercise for cardiovascular health?
|
17 |
+
---
|
18 |
+
|
19 |
+
Research published in the Journal of the American Heart Association indicates that regular exercise can reduce the risk of coronary heart disease by up to 30%. Physical activity helps strengthen heart muscles, improve blood circulation, and lower blood pressure.
|
20 |
+
|
21 |
+
---
|
22 |
+
|
23 |
+
Although exercise has many benefits, it is important to do it correctly to avoid injuries. Warming up before exercising and cooling down afterwards are highly recommended. Additionally, the type of exercise chosen should match the individual's physical condition to avoid unwanted risks.
|
24 |
+
|
25 |
+
---
|
26 |
+
|
27 |
+
According to a study from the Mayo Clinic, people who exercise regularly have better cholesterol levels and tend to have a healthier weight. Exercise can also increase insulin sensitivity and help regulate blood sugar levels, which are important factors in maintaining heart health.
|
28 |
+
|
29 |
+
---
|
30 |
+
|
31 |
+
Answer:
|
32 |
+
Regular physical exercise has several benefits for cardiovascular health. Firstly, it can reduce the risk of coronary heart disease by up to 30%, as it strengthens the heart muscles, improves blood circulation, and lowers blood pressure. Secondly, individuals who exercise regularly tend to have better cholesterol levels and a healthier weight, which are crucial for heart health. Additionally, regular exercise can increase insulin sensitivity and help regulate blood sugar levels, further contributing to cardiovascular well-being.
|
33 |
+
|
34 |
+
### Another example:
|
35 |
+
Question: What are the benefits of a fiber-rich diet for digestive health?
|
36 |
+
---
|
37 |
+
|
38 |
+
A fiber-rich diet is known to prevent constipation by increasing stool bulk and softness, making it easier to pass. Fiber also helps maintain gut health by promoting the growth of beneficial bacteria in the digestive system.
|
39 |
+
|
40 |
+
---
|
41 |
+
|
42 |
+
High-fiber foods such as fruits, vegetables, and whole grains are not only good for digestion but can also help control blood sugar levels and lower cholesterol. Soluble fiber in these foods helps slow down sugar absorption and binds cholesterol in the intestines.
|
43 |
+
|
44 |
+
---
|
45 |
+
|
46 |
+
Some studies suggest that a high-fiber diet can reduce the risk of colorectal cancer. Fiber helps speed up the elimination of carcinogenic substances from the colon, reducing the exposure time of colon cells to harmful materials.
|
47 |
+
|
48 |
+
---
|
49 |
+
|
50 |
+
Answer:
|
51 |
+
A diet rich in fiber has multiple benefits for digestive health. It can prevent constipation by increasing stool bulk and softness, making it easier to pass. Fiber also promotes gut health by encouraging the growth of beneficial bacteria in the digestive system. Additionally, high-fiber foods such as fruits, vegetables, and whole grains can help control blood sugar levels and lower cholesterol. Soluble fiber in these foods slows sugar absorption and binds cholesterol in the intestines. Furthermore, a high-fiber diet can reduce the risk of colorectal cancer by speeding up the removal of carcinogenic substances from the colon, thereby reducing the exposure time of colon cells to harmful materials.
|
52 |
+
|
53 |
+
### Input
|
54 |
+
Question: {question}
|
55 |
+
---
|
56 |
+
|
57 |
+
{context}
|
58 |
+
|
59 |
+
---
|
60 |
+
|
61 |
+
Answer:
|
62 |
+
"""
|
63 |
+
|
64 |
+
@tool
|
65 |
+
def send_emergency_message_to_medic(query: str) -> str:
|
66 |
+
"""This function is used to send a message containing user symptoms to the medic where the symptoms are related to emergency cases.
|
67 |
+
You must give the query semantically the same with the user input,
|
68 |
+
You can ONLY run this function ONE time, then you must run the 'search_hemodialysis_information' tools to get user symptoms explanation."""
|
69 |
+
url = "http://127.0.0.1:8000/message/get_message/"
|
70 |
+
|
71 |
+
user_id = str(random.randint(1, 100))
|
72 |
+
|
73 |
+
data = {
|
74 |
+
"message": query,
|
75 |
+
"user_id": user_id
|
76 |
+
}
|
77 |
+
|
78 |
+
#Turn data into json for the request
|
79 |
+
data = json.dumps(data)
|
80 |
+
|
81 |
+
response = requests.post(url, data=data)
|
82 |
+
return ' Success sending message. Please provide search query for the symtomps that patient has.\n'
|
83 |
+
|
84 |
+
@tool
|
85 |
+
def search_information_for_question(query: str) -> str:
|
86 |
+
"""Function that searches for information based on the user query. You must use this function if there are questions related to medical topics.
|
87 |
+
# The query is the message that the patient send to Panda, YOU MUST NOT CHANGE IT."""
|
88 |
+
# compressor = CrossEncoderReranker(model=reranker_model, top_n=3)
|
89 |
+
# compression_retriever = ContextualCompressionRetriever(
|
90 |
+
# base_compressor=compressor, base_retriever=db
|
91 |
+
# )
|
92 |
+
|
93 |
+
# query_translate = GoogleTranslator(source='english', target='id').translate(query)
|
94 |
+
# results = compression_retriever.invoke(query)
|
95 |
+
# target = "\n\n---\n\n".join([doc.page_content for doc in results])
|
96 |
+
# context_text = GoogleTranslator(source='id', target='english').translate(target)
|
97 |
+
# prompt_template = ChatPromptTemplate.from_template(PROMPT_TEMPLATE)
|
98 |
+
# prompt = prompt_template.format(context=context_text, question=query_translate)
|
99 |
+
# print(target)
|
100 |
+
# result = model_llm_rag.invoke(prompt)
|
101 |
+
# return GoogleTranslator(source='id', target='english').translate(result)
|
102 |
+
return GoogleSerperAPIWrapper().run(query)
|
103 |
+
|
104 |
+
# @tool
|
105 |
+
# def search_medic_info(query: str) -> str:
|
106 |
+
# results = db.similarity_search_with_relevance_scores(query, k=3)
|
107 |
+
# context_text = "\n\n---\n\n".join([doc.page_content for doc, _score in results])
|
108 |
+
# return context_text
|
109 |
+
|
110 |
+
# @tool
|
111 |
+
# def search_medic_info(query: str) -> str:
|
112 |
+
# """Function that searches for medical information based on the user query. The query is the message that the patient send to Panda, YOU MUST NOT CHANGE IT."""
|
113 |
+
# return GoogleSerperAPIWrapper().run(query)
|