hari-huynh commited on
Commit
d63c9ee
1 Parent(s): 2ec542e

Update Chainlit Huggingface Space

Browse files
.chainlit/config.toml ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ # Whether to enable telemetry (default: true). No personal data is collected.
3
+ enable_telemetry = true
4
+
5
+
6
+ # List of environment variables to be provided by each user to use the app.
7
+ user_env = []
8
+
9
+ # Duration (in seconds) during which the session is saved when the connection is lost
10
+ session_timeout = 3600
11
+
12
+ # Enable third parties caching (e.g LangChain cache)
13
+ cache = false
14
+
15
+ # Authorized origins
16
+ allow_origins = ["*"]
17
+
18
+ # Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317)
19
+ # follow_symlink = false
20
+
21
+ [features]
22
+ # Process and display HTML in messages. This can be a security risk (see https://stackoverflow.com/questions/19603097/why-is-it-dangerous-to-render-user-generated-html-or-javascript)
23
+ unsafe_allow_html = false
24
+
25
+ # Process and display mathematical expressions. This can clash with "$" characters in messages.
26
+ latex = false
27
+
28
+ # Automatically tag threads with the current chat profile (if a chat profile is used)
29
+ auto_tag_thread = true
30
+
31
+ # Authorize users to spontaneously upload files with messages
32
+ [features.spontaneous_file_upload]
33
+ enabled = true
34
+ accept = ["*/*"]
35
+ max_files = 20
36
+ max_size_mb = 500
37
+
38
+ [features.audio]
39
+ # Threshold for audio recording
40
+ min_decibels = -45
41
+ # Delay for the user to start speaking in MS
42
+ initial_silence_timeout = 3000
43
+ # Delay for the user to continue speaking in MS. If the user stops speaking for this duration, the recording will stop.
44
+ silence_timeout = 1500
45
+ # Above this duration (MS), the recording will forcefully stop.
46
+ max_duration = 15000
47
+ # Duration of the audio chunks in MS
48
+ chunk_duration = 1000
49
+ # Sample rate of the audio
50
+ sample_rate = 44100
51
+
52
+ [UI]
53
+ # Name of the assistant.
54
+ name = "Assistant"
55
+
56
+ # Description of the assistant. This is used for HTML tags.
57
+ # description = ""
58
+
59
+ # Large size content are by default collapsed for a cleaner ui
60
+ default_collapse_content = true
61
+
62
+ # Hide the chain of thought details from the user in the UI.
63
+ hide_cot = false
64
+
65
+ # Link to your github repo. This will add a github button in the UI's header.
66
+ # github = ""
67
+
68
+ # Specify a CSS file that can be used to customize the user interface.
69
+ # The CSS file can be served from the public directory or via an external link.
70
+ # custom_css = "/public/test.css"
71
+
72
+ # Specify a Javascript file that can be used to customize the user interface.
73
+ # The Javascript file can be served from the public directory.
74
+ # custom_js = "/public/test.js"
75
+
76
+ # Specify a custom font url.
77
+ # custom_font = "https://fonts.googleapis.com/css2?family=Inter:wght@400;500;700&display=swap"
78
+
79
+ # Specify a custom meta image url.
80
+ # custom_meta_image_url = "https://chainlit-cloud.s3.eu-west-3.amazonaws.com/logo/chainlit_banner.png"
81
+
82
+ # Specify a custom build directory for the frontend.
83
+ # This can be used to customize the frontend code.
84
+ # Be careful: If this is a relative path, it should not start with a slash.
85
+ # custom_build = "./public/build"
86
+
87
+ [UI.theme]
88
+ default = "dark"
89
+ #layout = "wide"
90
+ #font_family = "Inter, sans-serif"
91
+ # Override default MUI light theme. (Check theme.ts)
92
+ [UI.theme.light]
93
+ #background = "#FAFAFA"
94
+ #paper = "#FFFFFF"
95
+
96
+ [UI.theme.light.primary]
97
+ #main = "#F80061"
98
+ #dark = "#980039"
99
+ #light = "#FFE7EB"
100
+ [UI.theme.light.text]
101
+ #primary = "#212121"
102
+ #secondary = "#616161"
103
+
104
+ # Override default MUI dark theme. (Check theme.ts)
105
+ [UI.theme.dark]
106
+ #background = "#FAFAFA"
107
+ #paper = "#FFFFFF"
108
+
109
+ [UI.theme.dark.primary]
110
+ #main = "#F80061"
111
+ #dark = "#980039"
112
+ #light = "#FFE7EB"
113
+ [UI.theme.dark.text]
114
+ #primary = "#EEEEEE"
115
+ #secondary = "#BDBDBD"
116
+
117
+ [meta]
118
+ generated_by = "1.1.304"
.chainlit/translations/en-US.json ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "components": {
3
+ "atoms": {
4
+ "buttons": {
5
+ "userButton": {
6
+ "menu": {
7
+ "settings": "Settings",
8
+ "settingsKey": "S",
9
+ "APIKeys": "API Keys",
10
+ "logout": "Logout"
11
+ }
12
+ }
13
+ }
14
+ },
15
+ "molecules": {
16
+ "newChatButton": {
17
+ "newChat": "New Chat"
18
+ },
19
+ "tasklist": {
20
+ "TaskList": {
21
+ "title": "\ud83d\uddd2\ufe0f Task List",
22
+ "loading": "Loading...",
23
+ "error": "An error occurred"
24
+ }
25
+ },
26
+ "attachments": {
27
+ "cancelUpload": "Cancel upload",
28
+ "removeAttachment": "Remove attachment"
29
+ },
30
+ "newChatDialog": {
31
+ "createNewChat": "Create new chat?",
32
+ "clearChat": "This will clear the current messages and start a new chat.",
33
+ "cancel": "Cancel",
34
+ "confirm": "Confirm"
35
+ },
36
+ "settingsModal": {
37
+ "settings": "Settings",
38
+ "expandMessages": "Expand Messages",
39
+ "hideChainOfThought": "Hide Chain of Thought",
40
+ "darkMode": "Dark Mode"
41
+ },
42
+ "detailsButton": {
43
+ "using": "Using",
44
+ "used": "Used"
45
+ },
46
+ "auth": {
47
+ "authLogin": {
48
+ "title": "Login to access the app.",
49
+ "form": {
50
+ "email": "Email address",
51
+ "password": "Password",
52
+ "noAccount": "Don't have an account?",
53
+ "alreadyHaveAccount": "Already have an account?",
54
+ "signup": "Sign Up",
55
+ "signin": "Sign In",
56
+ "or": "OR",
57
+ "continue": "Continue",
58
+ "forgotPassword": "Forgot password?",
59
+ "passwordMustContain": "Your password must contain:",
60
+ "emailRequired": "email is a required field",
61
+ "passwordRequired": "password is a required field"
62
+ },
63
+ "error": {
64
+ "default": "Unable to sign in.",
65
+ "signin": "Try signing in with a different account.",
66
+ "oauthsignin": "Try signing in with a different account.",
67
+ "redirect_uri_mismatch": "The redirect URI is not matching the oauth app configuration.",
68
+ "oauthcallbackerror": "Try signing in with a different account.",
69
+ "oauthcreateaccount": "Try signing in with a different account.",
70
+ "emailcreateaccount": "Try signing in with a different account.",
71
+ "callback": "Try signing in with a different account.",
72
+ "oauthaccountnotlinked": "To confirm your identity, sign in with the same account you used originally.",
73
+ "emailsignin": "The e-mail could not be sent.",
74
+ "emailverify": "Please verify your email, a new email has been sent.",
75
+ "credentialssignin": "Sign in failed. Check the details you provided are correct.",
76
+ "sessionrequired": "Please sign in to access this page."
77
+ }
78
+ },
79
+ "authVerifyEmail": {
80
+ "almostThere": "You're almost there! We've sent an email to ",
81
+ "verifyEmailLink": "Please click on the link in that email to complete your signup.",
82
+ "didNotReceive": "Can't find the email?",
83
+ "resendEmail": "Resend email",
84
+ "goBack": "Go Back",
85
+ "emailSent": "Email sent successfully.",
86
+ "verifyEmail": "Verify your email address"
87
+ },
88
+ "providerButton": {
89
+ "continue": "Continue with {{provider}}",
90
+ "signup": "Sign up with {{provider}}"
91
+ },
92
+ "authResetPassword": {
93
+ "newPasswordRequired": "New password is a required field",
94
+ "passwordsMustMatch": "Passwords must match",
95
+ "confirmPasswordRequired": "Confirm password is a required field",
96
+ "newPassword": "New password",
97
+ "confirmPassword": "Confirm password",
98
+ "resetPassword": "Reset Password"
99
+ },
100
+ "authForgotPassword": {
101
+ "email": "Email address",
102
+ "emailRequired": "email is a required field",
103
+ "emailSent": "Please check the email address {{email}} for instructions to reset your password.",
104
+ "enterEmail": "Enter your email address and we will send you instructions to reset your password.",
105
+ "resendEmail": "Resend email",
106
+ "continue": "Continue",
107
+ "goBack": "Go Back"
108
+ }
109
+ }
110
+ },
111
+ "organisms": {
112
+ "chat": {
113
+ "history": {
114
+ "index": {
115
+ "showHistory": "Show history",
116
+ "lastInputs": "Last Inputs",
117
+ "noInputs": "Such empty...",
118
+ "loading": "Loading..."
119
+ }
120
+ },
121
+ "inputBox": {
122
+ "input": {
123
+ "placeholder": "Type your message here..."
124
+ },
125
+ "speechButton": {
126
+ "start": "Start recording",
127
+ "stop": "Stop recording"
128
+ },
129
+ "SubmitButton": {
130
+ "sendMessage": "Send message",
131
+ "stopTask": "Stop Task"
132
+ },
133
+ "UploadButton": {
134
+ "attachFiles": "Attach files"
135
+ },
136
+ "waterMark": {
137
+ "text": "Built with"
138
+ }
139
+ },
140
+ "Messages": {
141
+ "index": {
142
+ "running": "Running",
143
+ "executedSuccessfully": "executed successfully",
144
+ "failed": "failed",
145
+ "feedbackUpdated": "Feedback updated",
146
+ "updating": "Updating"
147
+ }
148
+ },
149
+ "dropScreen": {
150
+ "dropYourFilesHere": "Drop your files here"
151
+ },
152
+ "index": {
153
+ "failedToUpload": "Failed to upload",
154
+ "cancelledUploadOf": "Cancelled upload of",
155
+ "couldNotReachServer": "Could not reach the server",
156
+ "continuingChat": "Continuing previous chat"
157
+ },
158
+ "settings": {
159
+ "settingsPanel": "Settings panel",
160
+ "reset": "Reset",
161
+ "cancel": "Cancel",
162
+ "confirm": "Confirm"
163
+ }
164
+ },
165
+ "threadHistory": {
166
+ "sidebar": {
167
+ "filters": {
168
+ "FeedbackSelect": {
169
+ "feedbackAll": "Feedback: All",
170
+ "feedbackPositive": "Feedback: Positive",
171
+ "feedbackNegative": "Feedback: Negative"
172
+ },
173
+ "SearchBar": {
174
+ "search": "Search"
175
+ }
176
+ },
177
+ "DeleteThreadButton": {
178
+ "confirmMessage": "This will delete the thread as well as it's messages and elements.",
179
+ "cancel": "Cancel",
180
+ "confirm": "Confirm",
181
+ "deletingChat": "Deleting chat",
182
+ "chatDeleted": "Chat deleted"
183
+ },
184
+ "index": {
185
+ "pastChats": "Past Chats"
186
+ },
187
+ "ThreadList": {
188
+ "empty": "Empty...",
189
+ "today": "Today",
190
+ "yesterday": "Yesterday",
191
+ "previous7days": "Previous 7 days",
192
+ "previous30days": "Previous 30 days"
193
+ },
194
+ "TriggerButton": {
195
+ "closeSidebar": "Close sidebar",
196
+ "openSidebar": "Open sidebar"
197
+ }
198
+ },
199
+ "Thread": {
200
+ "backToChat": "Go back to chat",
201
+ "chatCreatedOn": "This chat was created on"
202
+ }
203
+ },
204
+ "header": {
205
+ "chat": "Chat",
206
+ "readme": "Readme"
207
+ }
208
+ }
209
+ },
210
+ "hooks": {
211
+ "useLLMProviders": {
212
+ "failedToFetchProviders": "Failed to fetch providers:"
213
+ }
214
+ },
215
+ "pages": {
216
+ "Design": {},
217
+ "Env": {
218
+ "savedSuccessfully": "Saved successfully",
219
+ "requiredApiKeys": "Required API Keys",
220
+ "requiredApiKeysInfo": "To use this app, the following API keys are required. The keys are stored on your device's local storage."
221
+ },
222
+ "Page": {
223
+ "notPartOfProject": "You are not part of this project."
224
+ },
225
+ "ResumeButton": {
226
+ "resumeChat": "Resume Chat"
227
+ }
228
+ }
229
+ }
.gitignore CHANGED
@@ -1,2 +1,4 @@
1
  .venv
2
- .idea
 
 
 
1
  .venv
2
+ .idea
3
+ __pycache__
4
+ .env
Dockerfile CHANGED
@@ -1,9 +1,8 @@
1
  # read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
2
  # you will also find guides on how best to write your Dockerfile
3
-
4
  FROM python:latest
5
 
6
- RUN apt-get update
7
 
8
  WORKDIR /code
9
 
@@ -30,4 +29,4 @@ RUN pip install --no-cache-dir --upgrade pip
30
  # Copy the current directory contents into the container at $HOME/app setting the owner to the user
31
  COPY --chown=user . $HOME/app
32
 
33
- CMD ["python", "main.py"]
 
1
  # read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
2
  # you will also find guides on how best to write your Dockerfile
 
3
  FROM python:latest
4
 
5
+ RUN apt-get update \
6
 
7
  WORKDIR /code
8
 
 
29
  # Copy the current directory contents into the container at $HOME/app setting the owner to the user
30
  COPY --chown=user . $HOME/app
31
 
32
+ CMD ["chainlit", "run", "app.py", "--port", "7860"]
__init__.py ADDED
File without changes
app.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.prompts import ChatPromptTemplate
2
+ from langchain.schema import StrOutputParser
3
+ from langchain.schema.runnable import Runnable
4
+ from langchain.schema.runnable.config import RunnableConfig
5
+ from react_agent_v2 import agent_executor
6
+ import chainlit as cl
7
+ from langchain_community.chat_message_histories import ChatMessageHistory
8
+ from langchain_core.chat_history import BaseChatMessageHistory
9
+ from langchain_core.runnables.history import RunnableWithMessageHistory
10
+
11
+ store = {}
12
+
13
+ def get_session_history(session_id: str) -> BaseChatMessageHistory:
14
+ if session_id not in store:
15
+ store[session_id] = ChatMessageHistory()
16
+ return store[session_id]
17
+
18
+
19
+ agent_with_chat_history = RunnableWithMessageHistory(
20
+ agent_executor,
21
+ get_session_history,
22
+ input_messages_key="input",
23
+ history_messages_key="chat_history",
24
+ )
25
+
26
+ # agent_with_chat_history.invoke("Have any company recruit Machine Learning jobs?")
27
+
28
+
29
+ @cl.on_chat_start
30
+ async def on_chat_start():
31
+ cl.user_session.set("runnable", agent_executor)
32
+
33
+ @cl.on_message
34
+ async def on_message(message: cl.Message):
35
+ # runnable = cl.user_session.get("runnable") # type: Runnable
36
+ #
37
+ # msg = cl.Message(content="")
38
+ #
39
+ # for chunk in await cl.make_async(runnable.stream)(
40
+ # {"input": message.content},
41
+ # config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]),
42
+ # ):
43
+ # await msg.stream_token(chunk)
44
+ #
45
+ # await msg.send()
46
+ #
47
+ # # Get user input from the message
48
+ # user_input = message.content
49
+ #
50
+ # # Run the agent with user input and get the response
51
+ # response = await cl.make_async(agent_executor)(user_input)
52
+ #
53
+ # # Display the response to the user
54
+ # cl.message(response)
55
+
56
+ llm_chain = cl.user_session.get("runnable")
57
+
58
+ response = await llm_chain.ainvoke({
59
+ "input": message.content
60
+ },
61
+ callbacks = [cl.AsyncLangchainCallbackHandler()]
62
+ )
63
+
64
+ await cl.Message(response["output"].replace("`", "")).send()
chainlit.md ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Welcome to Chainlit! 🚀🤖
2
+
3
+ Hi there, Developer! 👋 We're excited to have you on board. Chainlit is a powerful tool designed to help you prototype, debug and share applications built on top of LLMs.
4
+
5
+ ## Useful Links 🔗
6
+
7
+ - **Documentation:** Get started with our comprehensive [Chainlit Documentation](https://docs.chainlit.io) 📚
8
+ - **Discord Community:** Join our friendly [Chainlit Discord](https://discord.gg/k73SQ3FyUh) to ask questions, share your projects, and connect with other developers! 💬
9
+
10
+ We can't wait to see what you create with Chainlit! Happy coding! 💻😊
11
+
12
+ ## Welcome screen
13
+
14
+ To modify the welcome screen, edit the `chainlit.md` file at the root of your project. If you do not want a welcome screen, just leave this file empty.
15
+
16
+ ## 🤗 JobQA with Knowledge Graph
main.py CHANGED
@@ -20,5 +20,4 @@ demo = gr.ChatInterface(
20
  multimodal=True,
21
  )
22
 
23
- if __name__ == "__main__":
24
- demo.launch(server_name = "0.0.0.0", server_port = 7860)
 
20
  multimodal=True,
21
  )
22
 
23
+ demo.launch()
 
prompts/cypher_examples.yaml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ examples:
2
+ - question: Which companies located in 'San Francisco' are hiring for 'Data Scientist' roles with a 'Master's Degree' requirement?
3
+ cypher: |
4
+ MATCH (j:Job)<-[:RECRUITES]-(c:Company)-[:LOCATES_IN]->(l:Location)
5
+ MATCH (j)-[:REQUIRES]->(e:Education)
6
+ WHERE toLower(j.name) CONTAINS 'data scientist' AND toLower(l.name) CONTAINS 'san francisco' AND toLower(e.name) CONTAINS "master"
7
+ RETURN DISTINCT c.name AS company
8
+
9
+ - question: What are the most common skills required for 'Product Manager' jobs across different industries?
10
+ cypher: |
11
+ MATCH (j:Job)-[:REQUIRES]->(s:Skill)
12
+ WHERE toLower(j.name) CONTAINS "product manager"
13
+ RETURN s.name, count(*) AS skill_count
14
+ ORDER BY skill_count DESC
15
+ LIMIT 10
16
+
17
+ - question: Find all jobs that require at least 5 years of experience and a 'Bachelor's Degree' in 'Computer Science'
18
+ cypher: |
19
+ MATCH (we:Work_Exper)<-[:REQUIRES]-(j:Job)-[:REQUIRES]->(e:Education)
20
+ WHERE toLower(e.name) CONTAINS "bachelor" AND toLower(e.fields) CONTAINS "computer science" AND toLower(we.duration) CONTAINS "5 years"
21
+ RETURN j AS job
22
+
23
+ - question: Identify companies that are subsidiaries of 'Google' and are recruiting for 'Software Engineer' roles with 'Senior' level
24
+ cypher: |
25
+ MATCH (j:Job)<-[:RECRUITES]-(g:Company)<-[:SUBDIARY]-(c:Company)
26
+ MATCH (j)-[:AT_LEVEL]->(wl:Work_LV)
27
+ WHERE toLower(g.name) CONTAINS "google" AND toLower(j.name) CONTAINS "software engineer" AND toLower(wl.name) CONTAINS "senior"
28
+ RETURN DISTINCT c.name AS company
29
+
30
+ - question: Find companies recruiting "Machine Learning" jobs and their corresponding job titles.
31
+ cypher: |
32
+ MATCH (company: Company)-[:RECRUITES]->(job: Job)
33
+ WHERE job.name CONTAINS "Machine Learning"
34
+ RETURN company.name as company_name, job.name as job_title
35
+
36
+ - question: Machine Learning job requires?
37
+ cypher: |
38
+ MATCH (j:Job)
39
+ WHERE toLower(j.name) CONTAINS toLower("Machine Learning")
40
+ OPTIONAL MATCH (j)-[:REQUIRES]->(s:Skill)
41
+ OPTIONAL MATCH (j)-[:REQUIRES]->(e:Education)
42
+ OPTIONAL MATCH (j)-[:REQUIRES]->(we:Work_Exper)
43
+ RETURN s.name AS skill_requirements, e.name AS education_requirements, we.duration AS work_experience_requirements
prompts/cypher_instruct.yaml ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ prefix: |
2
+ You are an expert Neo4j Developer translating user questions into Cypher statement.
3
+ The queries should focus on using the CONTAINS keyword with toLower() to search for specific text patterns within node properties.
4
+ Must utilize MATCH to choose relevance node and relationships.
5
+ Utilize WHERE to filter data by using node properties.
6
+ Utilize RETURN to return results with aliases.
7
+
8
+ Instructions:
9
+ Use only the provided relationship types and properties in the schema.
10
+ Do not use any other relationship types or properties that are not provided.
11
+ Only respond to questions that require you to construct a Cypher statement.
12
+ Do not include any explanations or apologies in your responses.
13
+
14
+ Examples:
15
+
16
+ example_template: |
17
+ Question: {question}
18
+ Cypher: {cypher}
19
+
20
+ suffix: |
21
+ Schema: {schema}
22
+
23
+ Question: {{question}}
24
+ Cypher:
prompts/cypher_prompt.yaml CHANGED
@@ -51,8 +51,9 @@ template: |
51
  MATCH (company:Company)-[:RECRUITS]->(job:Job)-[r]->(node)
52
  WHERE job.name CONTAINS "Machine Learning" AND company.name CONTAINS "KMS"
53
  RETURN job, node
54
-
 
55
  The question is:
56
  {question}
57
 
58
- template_format: f-string
 
51
  MATCH (company:Company)-[:RECRUITS]->(job:Job)-[r]->(node)
52
  WHERE job.name CONTAINS "Machine Learning" AND company.name CONTAINS "KMS"
53
  RETURN job, node
54
+
55
+
56
  The question is:
57
  {question}
58
 
59
+ template_format: f-string
prompts/react_prompt.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Answer the following questions as best you can.
2
+ If the knowledge graph provides enough information, immediately answer the question.
3
+ You have access to the following tools:
4
+ {tools}
5
+
6
+ If the user does not provide enough information, use previous chat history to answer.
7
+ Previous chat history:
8
+ {chat_history}
9
+
10
+ Use the following format:
11
+
12
+ Question: the input question you must answer
13
+ Thought: you should always think about what to do
14
+ Action: the action to take, should be one of [{tool_names}]
15
+ Action Input: the input to the action
16
+ Observation: the result of the action
17
+ ... (this Thought/Action/Action Input/Observation can repeat N times)
18
+ Thought: I now know the final answer
19
+ Final Answer: the final answer to the original input question
20
+
21
+ Begin!
22
+
23
+ Question: {input}
24
+ Thought:{agent_scratchpad}
prompts/react_prompt_v2.txt ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand in Markdown format.
2
+ Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
3
+ Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
4
+
5
+ TOOLS:
6
+
7
+ ------
8
+
9
+ Assistant has access to the following tools:
10
+
11
+ {tools}
12
+
13
+ To use a tool, please use the following format:
14
+
15
+ ```
16
+ Thought: Do I need to use a tool? Yes
17
+ Action: the action to take, should be one of [{tool_names}]
18
+ Action Input: the input to the action
19
+ Observation: the result of the action
20
+ ```
21
+
22
+ If knowledge graph provide enough information, you MUST NOT use any tool.
23
+ When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
24
+
25
+ ```
26
+ Thought: Do I need to use a tool? No
27
+ Final Answer: [your response here]
28
+ ```
29
+
30
+ Begin!
31
+
32
+ Previous conversation history:
33
+ {chat_history}
34
+
35
+ New input: {input}
36
+
37
+ {agent_scratchpad}
prompts/schema.txt ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Node properties:
2
+ Company {{name: STRING}}
3
+ Job {{name: STRING, description: STRING, work_mode: STRING, benefit_compensation: STRING}}
4
+ Location {{name: STRING, location_type: STRING}}
5
+ Skill {{name: STRING}}
6
+ Education {{name: STRING, fields: STRING, status: STRING}}
7
+ Industry {{name: STRING}}
8
+ Work_LV {{name: STRING}}
9
+ Work_Exper {{name: STRING, duration: STRING}}
10
+ Relationship properties:
11
+
12
+ The relationships:
13
+ (:Company)-[:LOCATES_IN]->(:Location)
14
+ (:Company)-[:OPERATES_IN]->(:Industry)
15
+ (:Company)-[:RECRUITES]->(:Job)
16
+ (:Company)-[:SUBDIARY]->(:Company)
17
+ (:Job)-[:FROM]->(:Company)
18
+ (:Job)-[:WORK_AT]->(:Location)
19
+ (:Job)-[:REQUIRES]->(:Skill)
20
+ (:Job)-[:REQUIRES]->(:Education)
21
+ (:Job)-[:REQUIRES]->(:Work_Exper)
22
+ (:Job)-[:AT_LEVEL]->(:Work_LV)
23
+ (:Skill)-[:HYPERNYM]->(:Skill)
react_agent.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from tools import kg_search
2
+ from tools.kg_search import lookup_kg
3
+ from langchain.agents import AgentExecutor, create_react_agent
4
+ from langchain_core.prompts import PromptTemplate, ChatPromptTemplate, MessagesPlaceholder
5
+ from langchain.chains.conversation.memory import ConversationBufferMemory
6
+ from langchain.agents import Tool
7
+ from utils.utils import init_
8
+ from langchain_community.chat_message_histories import ChatMessageHistory
9
+ from langchain_core.runnables.history import RunnableWithMessageHistory
10
+
11
+ kg_query = Tool(
12
+ name = 'Query Knowledge Graph',
13
+ func = lookup_kg,
14
+ description='Useful for when you need to answer questions about job posts.'
15
+ )
16
+
17
+ tools = [kg_query]
18
+
19
+ with open("prompts/react_prompt.txt", "r") as file:
20
+ react_template = file.read()
21
+
22
+ react_prompt = PromptTemplate(
23
+ input_variables = ["tools", "tool_names", "input", "agent_scratchpad"],
24
+ template = react_template
25
+ )
26
+
27
+ prompt = ChatPromptTemplate.from_messages([
28
+ react_template,
29
+ MessagesPlaceholder(variable_name = "chat_history")
30
+ ])
31
+
32
+ _, llm = init_()
33
+
34
+ # Init ReAct agent
35
+ agent = create_react_agent(llm, tools, react_prompt)
36
+ agent_executor = AgentExecutor(
37
+ agent = agent,
38
+ tools = tools,
39
+ verbose = True
40
+ )
41
+
42
+ message_history = ChatMessageHistory()
43
+
44
+ agent_with_chat_history = RunnableWithMessageHistory(
45
+ agent_executor,
46
+ lambda session_id : message_history,
47
+ input_messages_key = "input",
48
+ history_messages_key = "chat_history"
49
+ )
50
+
51
+ if __name__ == "__main__":
52
+ # Test ReAct Agent
53
+ question = {
54
+ "input": "Have any company recruit Machine Learning jobs?"
55
+ }
56
+ result = agent_with_chat_history.invoke(
57
+ question,
58
+ config = {"configurable": {"session_id": "foo"}}
59
+ )
60
+ print(result)
61
+
62
+ print("Answered!!!!!!!!")
63
+
64
+ # Test memory
65
+ question = {
66
+ "input": "What did I just ask?"
67
+ }
68
+ result = agent_with_chat_history.invoke(
69
+ question,
70
+ config={"configurable": {"session_id": "foo"}}
71
+ )
72
+ print(result)
73
+
74
+ x = input("> ")
75
+
76
+
77
+
78
+
react_agent_v2.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.agents import Tool, AgentType, initialize_agent
2
+ from langchain.memory import ConversationBufferMemory
3
+ # from langchain.utilities import DuckDuckGoSearchAPIWrapper
4
+ from langchain_google_genai import ChatGoogleGenerativeAI
5
+ from langchain.agents import AgentExecutor
6
+ from langchain import hub
7
+ from langchain.agents.format_scratchpad import format_log_to_str
8
+ from langchain.agents.output_parsers import ReActSingleInputOutputParser
9
+ from langchain.tools.render import render_text_description
10
+ import os
11
+ from tools.kg_search import lookup_kg
12
+ from dotenv import load_dotenv
13
+ from langchain.agents import Tool
14
+ from langchain_core.prompts import PromptTemplate
15
+
16
+ load_dotenv()
17
+ os.environ["GOOGLE_API_KEY"] = os.getenv("GEMINI_API_KEY")
18
+ llm = ChatGoogleGenerativeAI(
19
+ model= "gemini-1.5-flash-latest",
20
+ temperature = 0
21
+ )
22
+
23
+ # search = DuckDuckGoSearchAPIWrapper()
24
+ #
25
+ # search_tool = Tool(name="Current Search",
26
+ # func=search.run,
27
+ # description="Useful when you need to answer questions about detail jobs information or search a job."
28
+ # )
29
+
30
+ kg_query = Tool(
31
+ name = 'Query Knowledge Graph',
32
+ func = lookup_kg,
33
+ description='Useful for when you need to answer questions about job posts.'
34
+ )
35
+
36
+
37
+ tools = [kg_query]
38
+ # memory = ConversationBufferMemory(memory_key="chat_history")
39
+ #
40
+ # agent_chain = initialize_agent(tools,
41
+ # llm,
42
+ # agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,
43
+ # memory=memory,
44
+ # verbose=True)
45
+
46
+ # agent_prompt = hub.pull("hwchase17/react-chat")
47
+
48
+ with open("prompts/react_prompt_v2.txt", "r") as file:
49
+ react_template = file.read()
50
+
51
+ react_prompt = PromptTemplate(
52
+ input_variables = ["tools", "tool_names", "input", "agent_scratchpad", "chat_history"],
53
+ template = react_template
54
+ )
55
+
56
+ prompt = react_prompt.partial(
57
+ tools = render_text_description(tools),
58
+ tool_names = ", ".join([t.name for t in tools]),
59
+ )
60
+
61
+ llm_with_stop = llm.bind(stop=["\nObservation"])
62
+
63
+ agent = (
64
+ {
65
+ "input": lambda x: x["input"],
66
+ "agent_scratchpad": lambda x: format_log_to_str(x["intermediate_steps"]),
67
+ "chat_history": lambda x: x["chat_history"],
68
+ }
69
+ | prompt
70
+ | llm_with_stop
71
+ | ReActSingleInputOutputParser()
72
+ )
73
+
74
+ memory = ConversationBufferMemory(memory_key="chat_history")
75
+
76
+ agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True, memory=memory)
77
+
78
+ # result = agent_executor.invoke({"input": "Have any company recruit Machine Learning jobs?"})
79
+ # print(result)
80
+
81
+ # result = agent_chain.run(input = "Have any company recruit Machine Learning jobs?")
82
+ # print(result)
83
+
84
+ # question = {
85
+ # "input": "What did I just ask?"
86
+ # }
87
+ #
88
+ # result = agent_executor.invoke(question)
89
+ # print(result)
90
+
91
+ if __name__ == "__main__":
92
+ while True:
93
+ try:
94
+ question = input("> ")
95
+ result = agent_executor.invoke({
96
+ "input": question
97
+ })
98
+ except:
99
+ break
requirements.txt CHANGED
@@ -2,4 +2,7 @@ gradio
2
  langchain
3
  langchain-community
4
  langchain-google-genai
5
- neo4j
 
 
 
 
2
  langchain
3
  langchain-community
4
  langchain-google-genai
5
+ langchain-core
6
+ faiss-cpu
7
+ neo4j
8
+ langchainhub
tools/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from utils import utils
tools/duckduckgo_search.py ADDED
File without changes
tools/job_recommendation.py ADDED
File without changes
tools/kg_search.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import yaml
3
+ from dotenv import load_dotenv
4
+ from langchain_core.example_selectors import SemanticSimilarityExampleSelector
5
+ from langchain_core.prompts import FewShotPromptTemplate, PromptTemplate
6
+ from langchain_google_genai import ChatGoogleGenerativeAI
7
+ from langchain_google_genai import GoogleGenerativeAIEmbeddings
8
+ from langchain_community.vectorstores import FAISS
9
+ from langchain.schema import AIMessage, HumanMessage, SystemMessage
10
+ from langchain.schema.output_parser import StrOutputParser
11
+ from langchain.tools import BaseTool, StructuredTool, tool
12
+ from langchain_community.graphs import Neo4jGraph
13
+ # from utils import utils
14
+
15
+
16
+ # Question-Cypher pair examples
17
+ with open("prompts/cypher_examples.yaml", "r") as f:
18
+ example_pairs = yaml.safe_load(f)
19
+
20
+ examples = example_pairs["examples"]
21
+
22
+ # LLM for choose the best similar examples
23
+ load_dotenv()
24
+ os.environ["GOOGLE_API_KEY"] = os.getenv("GEMINI_API_KEY")
25
+
26
+ embedding_model = GoogleGenerativeAIEmbeddings(
27
+ model= "models/text-embedding-004"
28
+ )
29
+
30
+ example_selector = SemanticSimilarityExampleSelector.from_examples(
31
+ examples = examples,
32
+ embeddings = embedding_model,
33
+ vectorstore_cls = FAISS,
34
+ k = 1
35
+ )
36
+
37
+ # Load schema, prefix, suffix
38
+ with open("prompts/schema.txt", "r") as file:
39
+ schema = file.read()
40
+
41
+ with open("prompts/cypher_instruct.yaml", "r") as file:
42
+ instruct = yaml.safe_load(file)
43
+
44
+ example_prompt = PromptTemplate(
45
+ input_variables = ["question", "cypher"],
46
+ template = instruct["example_template"]
47
+ )
48
+
49
+ dynamic_prompt = FewShotPromptTemplate(
50
+ example_selector = example_selector,
51
+ example_prompt = example_prompt,
52
+ prefix = instruct["prefix"],
53
+ suffix = instruct["suffix"].format(schema=schema),
54
+ input_variables = ["question"]
55
+ )
56
+
57
+ def generate_cypher(question: str) -> str:
58
+ """Make Cypher query from given question."""
59
+ load_dotenv()
60
+
61
+ # Set up Neo4J & Gemini API
62
+ os.environ["NEO4J_URI"] = os.getenv("NEO4J_URI")
63
+ os.environ["NEO4J_USERNAME"] = os.getenv("NEO4J_USERNAME")
64
+ os.environ["NEO4J_PASSWORD"] = os.getenv("NEO4J_PASSWORD")
65
+ os.environ["GOOGLE_API_KEY"] = os.getenv("GEMINI_API_KEY")
66
+
67
+ gemini_chat = ChatGoogleGenerativeAI(
68
+ model= "gemini-1.5-flash-latest"
69
+ )
70
+
71
+ chat_messages = [
72
+ SystemMessage(content= dynamic_prompt.format(question=question)),
73
+ ]
74
+
75
+ output_parser = StrOutputParser()
76
+ chain = dynamic_prompt | gemini_chat | output_parser
77
+ cypher_statement = chain.invoke(question)
78
+ cypher_statement = cypher_statement.replace("```", "").replace("cypher", "").strip()
79
+
80
+ return cypher_statement
81
+
82
+ def run_cypher(question, cypher_statement: str) -> str:
83
+ """Return result of Cypher query from Knowledge Graph."""
84
+ knowledge_graph = Neo4jGraph()
85
+ result = knowledge_graph.query(cypher_statement)
86
+
87
+ gemini_chat = ChatGoogleGenerativeAI(
88
+ model= "gemini-1.5-flash-latest"
89
+ )
90
+
91
+ answer_prompt = f"""
92
+ Generate a concise and informative summary of the results in a polite and easy-to-understand manner based on question and Cypher query response.
93
+ Question: {question}
94
+ Response: {str(result)}
95
+
96
+ Avoid repeat information.
97
+ If response is empty, you should answer "Knowledge graph doesn't have enough information".
98
+ Answer:
99
+ """
100
+
101
+ sys_answer_prompt = [
102
+ SystemMessage(content= answer_prompt),
103
+ HumanMessage(content="Provide information about question from knowledge graph")
104
+ ]
105
+
106
+ response = gemini_chat.invoke(sys_answer_prompt)
107
+ answer = response.content
108
+ return answer
109
+
110
+ def lookup_kg(question: str) -> str:
111
+ """Based on question, make and run Cypher statements.
112
+ question: str
113
+ Raw question from user input
114
+ """
115
+ cypher_statement = generate_cypher(question)
116
+ cypher_statement = cypher_statement.replace("cypher", "").replace("```", "").strip()
117
+
118
+ try:
119
+ answer = run_cypher(question, cypher_statement)
120
+ except:
121
+ answer = "Knowledge graph doesn't have enough information"
122
+
123
+ return answer
124
+
125
+
126
+ if __name__ == "__main__":
127
+ question = "Have any company is recruiting Machine Learning jobs?"
128
+
129
+ # Test few-shot template
130
+ # print(dynamic_prompt.format(question = "What does the Software Engineer job usually require?"))
131
+
132
+ # # Test generate Cypher
133
+ # result = generate_cypher(question)
134
+
135
+ # # Test return information from Cypher
136
+ # final_result = run_cypher(result)
137
+ # print(final_result)
138
+
139
+ # Test lookup_kg tool
140
+ kg_info = lookup_kg.invoke(question)
141
+ print(kg_info)
utils.py CHANGED
@@ -8,7 +8,7 @@ from langchain.chains import GraphCypherQAChain
8
  from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
9
 
10
  def config():
11
- # load_dotenv()
12
 
13
  # Set up Neo4J & Gemini API
14
  os.environ["NEO4J_URI"] = os.getenv("NEO4J_URI")
@@ -24,12 +24,17 @@ def load_prompt(filepath):
24
 
25
  def init_():
26
  config()
27
- knowledge_graph = Neo4jGraph()
28
- llm_chat = ChatGoogleGenerativeAI(
29
- model= "gemini-1.5-flash-latest"
 
30
  )
31
 
 
 
 
32
  # Connect to Neo4J Knowledge Graph
 
33
  cypher_prompt = load_prompt("prompts/cypher_prompt.yaml")
34
  qa_prompt = load_prompt("prompts/qa_prompt.yaml")
35
 
@@ -41,15 +46,8 @@ def init_():
41
  cypher_prompt= CYPHER_GENERATION_PROMPT,
42
  qa_prompt= QA_GENERATION_PROMPT
43
  )
44
-
45
- return chain
46
 
47
- # Init GraphQA Chain
48
- chain = init_()
49
-
50
- def get_llm_response(query):
51
  return chain.invoke({"query": query})["result"]
52
-
53
 
54
  def llm_answer(message, history):
55
  # history_langchain_format = []
 
8
  from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
9
 
10
  def config():
11
+ load_dotenv()
12
 
13
  # Set up Neo4J & Gemini API
14
  os.environ["NEO4J_URI"] = os.getenv("NEO4J_URI")
 
24
 
25
  def init_():
26
  config()
27
+ graph = Neo4jGraph(enhanced_schema= True)
28
+ llm = ChatGoogleGenerativeAI(
29
+ model= "gemini-1.5-flash-latest",
30
+ temperature = 0
31
  )
32
 
33
+ return graph, llm
34
+
35
+ def get_llm_response(query):
36
  # Connect to Neo4J Knowledge Graph
37
+ knowledge_graph, llm_chat = init_()
38
  cypher_prompt = load_prompt("prompts/cypher_prompt.yaml")
39
  qa_prompt = load_prompt("prompts/qa_prompt.yaml")
40
 
 
46
  cypher_prompt= CYPHER_GENERATION_PROMPT,
47
  qa_prompt= QA_GENERATION_PROMPT
48
  )
 
 
49
 
 
 
 
 
50
  return chain.invoke({"query": query})["result"]
 
51
 
52
  def llm_answer(message, history):
53
  # history_langchain_format = []
utils/__init__.py ADDED
File without changes
utils/utils.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import yaml
3
+ from dotenv import load_dotenv
4
+ from langchain_google_genai import ChatGoogleGenerativeAI
5
+ from langchain_community.graphs import Neo4jGraph
6
+ from langchain_core.prompts.prompt import PromptTemplate
7
+ from langchain.chains import GraphCypherQAChain
8
+ from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
9
+
10
+
11
+
12
+ def config():
13
+ load_dotenv()
14
+
15
+ # Set up Neo4J & Gemini API
16
+ os.environ["NEO4J_URI"] = os.getenv("NEO4J_URI")
17
+ os.environ["NEO4J_USERNAME"] = os.getenv("NEO4J_USERNAME")
18
+ os.environ["NEO4J_PASSWORD"] = os.getenv("NEO4J_PASSWORD")
19
+ os.environ["GOOGLE_API_KEY"] = os.getenv("GEMINI_API_KEY")
20
+
21
+ def load_prompt(filepath):
22
+ with open(filepath, "r") as file:
23
+ prompt = yaml.safe_load(file)
24
+
25
+ return prompt
26
+
27
+ def init_():
28
+ config()
29
+ graph = Neo4jGraph()
30
+ llm = ChatGoogleGenerativeAI(
31
+ model= "gemini-1.5-flash-latest"
32
+ )
33
+
34
+ return graph, llm
35
+
36
+ def get_llm_response(query):
37
+ # Connect to Neo4J Knowledge Graph
38
+ knowledge_graph, llm_chat = init_()
39
+ cypher_prompt = load_prompt("prompts/cypher_prompt.yaml")
40
+ qa_prompt = load_prompt("prompts/qa_prompt.yaml")
41
+
42
+ CYPHER_GENERATION_PROMPT = PromptTemplate(**cypher_prompt)
43
+ QA_GENERATION_PROMPT = PromptTemplate(**qa_prompt)
44
+
45
+ chain = GraphCypherQAChain.from_llm(
46
+ llm_chat, graph=knowledge_graph, verbose=True,
47
+ cypher_prompt= CYPHER_GENERATION_PROMPT,
48
+ qa_prompt= QA_GENERATION_PROMPT
49
+ )
50
+
51
+ return chain.invoke({"query": query})["result"]
52
+
53
+ def llm_answer(message, history):
54
+ # history_langchain_format = []
55
+ #
56
+ # for human, ai in history:
57
+ # history_langchain_format.append(HumanMessage(content= human))
58
+ # history_langchain_format.append(AIMessage(content= ai))
59
+ #
60
+ # history_langchain_format.append(HumanMessage(content= message["text"]))
61
+
62
+ try:
63
+ response = get_llm_response(message["text"])
64
+ except Exception:
65
+ response = "Exception"
66
+ except Error:
67
+ response = "Error"
68
+ return response
69
+
70
+ # if __name__ == "__main__":
71
+ # message = "Have any company recruiting jobs about Machine Learning and coresponding job titles?"
72
+ # history = [("What's your name?", "My name is Gemini")]
73
+ # resp = llm_answer(message, history)
74
+ # print(resp)
75
+