Spaces:
Runtime error
Runtime error
Johnny Lee
commited on
Commit
·
2cba383
1
Parent(s):
b832bd1
update
Browse files- .gitignore +160 -0
- .txt +0 -1
- app.py +57 -23
- make_template.py +0 -12
- partnering with a battery supplier (joint venture, for example).txt +0 -1
- requirements.txt +5 -5
- rivian.txt +0 -1
.gitignore
CHANGED
@@ -1,2 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
.env
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
chats/*
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
.Python
|
11 |
+
build/
|
12 |
+
develop-eggs/
|
13 |
+
dist/
|
14 |
+
downloads/
|
15 |
+
eggs/
|
16 |
+
.eggs/
|
17 |
+
lib/
|
18 |
+
lib64/
|
19 |
+
parts/
|
20 |
+
sdist/
|
21 |
+
var/
|
22 |
+
wheels/
|
23 |
+
share/python-wheels/
|
24 |
+
*.egg-info/
|
25 |
+
.installed.cfg
|
26 |
+
*.egg
|
27 |
+
MANIFEST
|
28 |
+
|
29 |
+
# PyInstaller
|
30 |
+
# Usually these files are written by a python script from a template
|
31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
32 |
+
*.manifest
|
33 |
+
*.spec
|
34 |
+
|
35 |
+
# Installer logs
|
36 |
+
pip-log.txt
|
37 |
+
pip-delete-this-directory.txt
|
38 |
+
|
39 |
+
# Unit test / coverage reports
|
40 |
+
htmlcov/
|
41 |
+
.tox/
|
42 |
+
.nox/
|
43 |
+
.coverage
|
44 |
+
.coverage.*
|
45 |
+
.cache
|
46 |
+
nosetests.xml
|
47 |
+
coverage.xml
|
48 |
+
*.cover
|
49 |
+
*.py,cover
|
50 |
+
.hypothesis/
|
51 |
+
.pytest_cache/
|
52 |
+
cover/
|
53 |
+
|
54 |
+
# Translations
|
55 |
+
*.mo
|
56 |
+
*.pot
|
57 |
+
|
58 |
+
# Django stuff:
|
59 |
+
*.log
|
60 |
+
local_settings.py
|
61 |
+
db.sqlite3
|
62 |
+
db.sqlite3-journal
|
63 |
+
|
64 |
+
# Flask stuff:
|
65 |
+
instance/
|
66 |
+
.webassets-cache
|
67 |
+
|
68 |
+
# Scrapy stuff:
|
69 |
+
.scrapy
|
70 |
+
|
71 |
+
# Sphinx documentation
|
72 |
+
docs/_build/
|
73 |
+
|
74 |
+
# PyBuilder
|
75 |
+
.pybuilder/
|
76 |
+
target/
|
77 |
+
|
78 |
+
# Jupyter Notebook
|
79 |
+
.ipynb_checkpoints
|
80 |
+
|
81 |
+
# IPython
|
82 |
+
profile_default/
|
83 |
+
ipython_config.py
|
84 |
+
|
85 |
+
# pyenv
|
86 |
+
# For a library or package, you might want to ignore these files since the code is
|
87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
88 |
+
# .python-version
|
89 |
+
|
90 |
+
# pipenv
|
91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
94 |
+
# install all needed dependencies.
|
95 |
+
#Pipfile.lock
|
96 |
+
|
97 |
+
# poetry
|
98 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
100 |
+
# commonly ignored for libraries.
|
101 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
102 |
+
#poetry.lock
|
103 |
+
|
104 |
+
# pdm
|
105 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
106 |
+
#pdm.lock
|
107 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
108 |
+
# in version control.
|
109 |
+
# https://pdm.fming.dev/#use-with-ide
|
110 |
+
.pdm.toml
|
111 |
+
|
112 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
113 |
+
__pypackages__/
|
114 |
+
|
115 |
+
# Celery stuff
|
116 |
+
celerybeat-schedule
|
117 |
+
celerybeat.pid
|
118 |
+
|
119 |
+
# SageMath parsed files
|
120 |
+
*.sage.py
|
121 |
+
|
122 |
+
# Environments
|
123 |
.env
|
124 |
+
.venv
|
125 |
+
env/
|
126 |
+
venv/
|
127 |
+
ENV/
|
128 |
+
env.bak/
|
129 |
+
venv.bak/
|
130 |
+
|
131 |
+
# Spyder project settings
|
132 |
+
.spyderproject
|
133 |
+
.spyproject
|
134 |
+
|
135 |
+
# Rope project settings
|
136 |
+
.ropeproject
|
137 |
+
|
138 |
+
# mkdocs documentation
|
139 |
+
/site
|
140 |
+
|
141 |
+
# mypy
|
142 |
+
.mypy_cache/
|
143 |
+
.dmypy.json
|
144 |
+
dmypy.json
|
145 |
+
|
146 |
+
# Pyre type checker
|
147 |
+
.pyre/
|
148 |
+
|
149 |
+
# pytype static type analyzer
|
150 |
+
.pytype/
|
151 |
+
|
152 |
+
# Cython debug symbols
|
153 |
+
cython_debug/
|
154 |
+
|
155 |
+
# PyCharm
|
156 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
157 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
158 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
159 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
160 |
+
#.idea/
|
161 |
+
|
162 |
chats/*
|
.txt
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
Buying from multiple battery suppliers
|
|
|
|
app.py
CHANGED
@@ -3,6 +3,7 @@ import asyncio
|
|
3 |
import datetime
|
4 |
import logging
|
5 |
import os
|
|
|
6 |
import json
|
7 |
import uuid
|
8 |
|
@@ -18,7 +19,9 @@ import tiktoken
|
|
18 |
# load_dotenv()
|
19 |
|
20 |
from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
|
|
|
21 |
from langchain.chains import ConversationChain
|
|
|
22 |
from langchain.chat_models import ChatAnthropic, ChatOpenAI
|
23 |
from langchain.memory import ConversationTokenBufferMemory
|
24 |
from langchain.prompts.chat import (
|
@@ -68,6 +71,19 @@ def reset_textbox():
|
|
68 |
|
69 |
|
70 |
def auth(username, password):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
return (username, password) in creds
|
72 |
|
73 |
|
@@ -137,13 +153,17 @@ def update_system_prompt(
|
|
137 |
return_messages=True,
|
138 |
)
|
139 |
state["chain"] = ConversationChain(
|
140 |
-
memory=state["memory"],
|
|
|
|
|
141 |
)
|
142 |
updated_status = "Prompt Updated! Chat has reset."
|
143 |
return updated_status, state
|
144 |
|
145 |
|
146 |
-
def set_state(
|
|
|
|
|
147 |
if state is None:
|
148 |
template = make_template()
|
149 |
llm_state = make_llm_state()
|
@@ -151,7 +171,9 @@ def set_state(state: Optional[gr.State] = None) -> Dict[str, Any]:
|
|
151 |
memory = ConversationTokenBufferMemory(
|
152 |
llm=llm, max_token_limit=llm_state["context_length"], return_messages=True
|
153 |
)
|
154 |
-
chain = ConversationChain(
|
|
|
|
|
155 |
session_id = str(uuid.uuid4())
|
156 |
state = dict(
|
157 |
template=template,
|
@@ -170,7 +192,7 @@ async def respond(
|
|
170 |
inp: str,
|
171 |
state: Optional[Dict[str, Any]],
|
172 |
request: gr.Request,
|
173 |
-
):
|
174 |
"""Execute the chat functionality."""
|
175 |
|
176 |
def prep_messages(
|
@@ -211,7 +233,7 @@ async def respond(
|
|
211 |
|
212 |
try:
|
213 |
if state is None:
|
214 |
-
state = set_state()
|
215 |
llm = state["llm_state"]["llm"]
|
216 |
context_length = state["llm_state"]["context_length"]
|
217 |
tokenizer = state["llm_state"]["tokenizer"]
|
@@ -229,16 +251,31 @@ async def respond(
|
|
229 |
LOG.info(f"Tokens to send: {total_token_count}")
|
230 |
# Run chain and append input.
|
231 |
callback = AsyncIteratorCallbackHandler()
|
|
|
232 |
run = asyncio.create_task(
|
233 |
-
state["chain"].apredict(
|
|
|
|
|
|
|
234 |
)
|
235 |
state["history"].append((inp, ""))
|
|
|
236 |
async for tok in callback.aiter():
|
237 |
user, bot = state["history"][-1]
|
238 |
bot += tok
|
239 |
state["history"][-1] = (user, bot)
|
240 |
-
yield state["history"], state
|
241 |
await run
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
242 |
LOG.info(f"""[{request.username}] ENDING CHAIN""")
|
243 |
LOG.debug(f"History: {state['history']}")
|
244 |
LOG.debug(f"Memory: {state['memory'].json()}")
|
@@ -251,7 +288,7 @@ async def respond(
|
|
251 |
},
|
252 |
)
|
253 |
LOG.debug(f"Data to flag: {data_to_flag}")
|
254 |
-
gradio_flagger.flag(flag_data=data_to_flag, username=request.username)
|
255 |
except Exception as e:
|
256 |
LOG.exception(e)
|
257 |
raise e
|
@@ -265,7 +302,7 @@ theme = gr.themes.Soft()
|
|
265 |
|
266 |
creds = [(os.getenv("CHAT_USERNAME"), os.getenv("CHAT_PASSWORD"))]
|
267 |
|
268 |
-
gradio_flagger = gr.HuggingFaceDatasetSaver(HF_TOKEN, "chats")
|
269 |
title = "AI Debate Partner"
|
270 |
|
271 |
with gr.Blocks(
|
@@ -297,34 +334,31 @@ with gr.Blocks(
|
|
297 |
with gr.Tab("Chatbot"):
|
298 |
with gr.Column():
|
299 |
chatbot = gr.Chatbot(label="ChatBot")
|
300 |
-
|
301 |
placeholder="Send a message.",
|
302 |
label="Type an input and press Enter",
|
303 |
)
|
304 |
b1 = gr.Button(value="Submit")
|
|
|
305 |
|
306 |
-
gradio_flagger.setup([chatbot], "chats")
|
307 |
|
308 |
-
|
309 |
-
respond,
|
310 |
-
[inputs, state],
|
311 |
-
[chatbot, state],
|
312 |
-
)
|
313 |
-
b1.click(
|
314 |
-
respond,
|
315 |
-
[inputs, state],
|
316 |
-
[chatbot, state],
|
317 |
)
|
|
|
|
|
|
|
318 |
update_system_button.click(
|
319 |
update_system_prompt,
|
320 |
[system_prompt_input, llm_input, case_input],
|
321 |
[status_markdown, state],
|
322 |
)
|
323 |
|
324 |
-
update_system_button.click(reset_textbox, [], [
|
325 |
update_system_button.click(reset_textbox, [], [chatbot])
|
326 |
-
b1.click(reset_textbox, [], [
|
327 |
-
|
328 |
|
329 |
demo.queue(max_size=99, concurrency_count=99, api_open=False).launch(
|
330 |
debug=True, auth=auth
|
|
|
3 |
import datetime
|
4 |
import logging
|
5 |
import os
|
6 |
+
import requests
|
7 |
import json
|
8 |
import uuid
|
9 |
|
|
|
19 |
# load_dotenv()
|
20 |
|
21 |
from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
|
22 |
+
from langchain.callbacks.tracers.run_collector import RunCollectorCallbackHandler
|
23 |
from langchain.chains import ConversationChain
|
24 |
+
from langsmith import Client
|
25 |
from langchain.chat_models import ChatAnthropic, ChatOpenAI
|
26 |
from langchain.memory import ConversationTokenBufferMemory
|
27 |
from langchain.prompts.chat import (
|
|
|
71 |
|
72 |
|
73 |
def auth(username, password):
|
74 |
+
auth_endpoint = "https://worker_auth.jclcw.workers.dev/auth"
|
75 |
+
try:
|
76 |
+
auth_payload = {username: password}
|
77 |
+
print(auth_payload)
|
78 |
+
auth_response = requests.post(
|
79 |
+
auth_endpoint,
|
80 |
+
json=auth_payload,
|
81 |
+
timeout=3,
|
82 |
+
)
|
83 |
+
auth_response.raise_for_status()
|
84 |
+
return auth_response.status_code == 200
|
85 |
+
except Exception as exc:
|
86 |
+
LOG.error(exc)
|
87 |
return (username, password) in creds
|
88 |
|
89 |
|
|
|
153 |
return_messages=True,
|
154 |
)
|
155 |
state["chain"] = ConversationChain(
|
156 |
+
memory=state["memory"],
|
157 |
+
prompt=state["template"],
|
158 |
+
llm=llm,
|
159 |
)
|
160 |
updated_status = "Prompt Updated! Chat has reset."
|
161 |
return updated_status, state
|
162 |
|
163 |
|
164 |
+
def set_state(
|
165 |
+
state: Optional[gr.State] = None, metadata: Optional[Dict[str, str]] = None
|
166 |
+
) -> Dict[str, Any]:
|
167 |
if state is None:
|
168 |
template = make_template()
|
169 |
llm_state = make_llm_state()
|
|
|
171 |
memory = ConversationTokenBufferMemory(
|
172 |
llm=llm, max_token_limit=llm_state["context_length"], return_messages=True
|
173 |
)
|
174 |
+
chain = ConversationChain(
|
175 |
+
memory=memory, prompt=template, llm=llm, metadata=metadata
|
176 |
+
)
|
177 |
session_id = str(uuid.uuid4())
|
178 |
state = dict(
|
179 |
template=template,
|
|
|
192 |
inp: str,
|
193 |
state: Optional[Dict[str, Any]],
|
194 |
request: gr.Request,
|
195 |
+
) -> Tuple[List[str], gr.State, Optional[str]]:
|
196 |
"""Execute the chat functionality."""
|
197 |
|
198 |
def prep_messages(
|
|
|
233 |
|
234 |
try:
|
235 |
if state is None:
|
236 |
+
state = set_state(metadata=dict(username=request.username))
|
237 |
llm = state["llm_state"]["llm"]
|
238 |
context_length = state["llm_state"]["context_length"]
|
239 |
tokenizer = state["llm_state"]["tokenizer"]
|
|
|
251 |
LOG.info(f"Tokens to send: {total_token_count}")
|
252 |
# Run chain and append input.
|
253 |
callback = AsyncIteratorCallbackHandler()
|
254 |
+
run_collector = RunCollectorCallbackHandler()
|
255 |
run = asyncio.create_task(
|
256 |
+
state["chain"].apredict(
|
257 |
+
input=inp,
|
258 |
+
callbacks=[callback, run_collector],
|
259 |
+
)
|
260 |
)
|
261 |
state["history"].append((inp, ""))
|
262 |
+
run_id = None
|
263 |
async for tok in callback.aiter():
|
264 |
user, bot = state["history"][-1]
|
265 |
bot += tok
|
266 |
state["history"][-1] = (user, bot)
|
267 |
+
yield state["history"], state, None
|
268 |
await run
|
269 |
+
if run_collector.traced_runs and run_id is None:
|
270 |
+
run_id = run_collector.traced_runs[0].id
|
271 |
+
LOG.info(f"RUNID: {run_id}")
|
272 |
+
if run_id:
|
273 |
+
run_collector.traced_runs = []
|
274 |
+
url = Client().share_run(run_id)
|
275 |
+
LOG.info(f"""URL : {url}""")
|
276 |
+
url_markdown = f"""[Shareable chat history link]({url})
|
277 |
+
[{url}]({url})"""
|
278 |
+
yield state["history"], state, url_markdown
|
279 |
LOG.info(f"""[{request.username}] ENDING CHAIN""")
|
280 |
LOG.debug(f"History: {state['history']}")
|
281 |
LOG.debug(f"Memory: {state['memory'].json()}")
|
|
|
288 |
},
|
289 |
)
|
290 |
LOG.debug(f"Data to flag: {data_to_flag}")
|
291 |
+
# gradio_flagger.flag(flag_data=data_to_flag, username=request.username)
|
292 |
except Exception as e:
|
293 |
LOG.exception(e)
|
294 |
raise e
|
|
|
302 |
|
303 |
creds = [(os.getenv("CHAT_USERNAME"), os.getenv("CHAT_PASSWORD"))]
|
304 |
|
305 |
+
# gradio_flagger = gr.HuggingFaceDatasetSaver(HF_TOKEN, "chats")
|
306 |
title = "AI Debate Partner"
|
307 |
|
308 |
with gr.Blocks(
|
|
|
334 |
with gr.Tab("Chatbot"):
|
335 |
with gr.Column():
|
336 |
chatbot = gr.Chatbot(label="ChatBot")
|
337 |
+
input_message = gr.Textbox(
|
338 |
placeholder="Send a message.",
|
339 |
label="Type an input and press Enter",
|
340 |
)
|
341 |
b1 = gr.Button(value="Submit")
|
342 |
+
share_link = gr.Markdown()
|
343 |
|
344 |
+
# gradio_flagger.setup([chatbot], "chats")
|
345 |
|
346 |
+
chat_bot_submit_params = dict(
|
347 |
+
fn=respond, inputs=[input_message, state], outputs=[chatbot, state, share_link]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
348 |
)
|
349 |
+
|
350 |
+
input_message.submit(**chat_bot_submit_params)
|
351 |
+
b1.click(**chat_bot_submit_params)
|
352 |
update_system_button.click(
|
353 |
update_system_prompt,
|
354 |
[system_prompt_input, llm_input, case_input],
|
355 |
[status_markdown, state],
|
356 |
)
|
357 |
|
358 |
+
update_system_button.click(reset_textbox, [], [input_message])
|
359 |
update_system_button.click(reset_textbox, [], [chatbot])
|
360 |
+
b1.click(reset_textbox, [], [input_message])
|
361 |
+
input_message.submit(reset_textbox, [], [input_message])
|
362 |
|
363 |
demo.queue(max_size=99, concurrency_count=99, api_open=False).launch(
|
364 |
debug=True, auth=auth
|
make_template.py
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import fire
|
3 |
-
|
4 |
-
|
5 |
-
def main():
|
6 |
-
with open("templates.json", "r") as f:
|
7 |
-
templates = json.load(f)
|
8 |
-
return templates[0]
|
9 |
-
|
10 |
-
|
11 |
-
if __name__ == "__main__":
|
12 |
-
fire.Fire(main())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
partnering with a battery supplier (joint venture, for example).txt
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
Developing in-house manufacturing capabilities
|
|
|
|
requirements.txt
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
|
|
1 |
+
anthropic==0.3.7
|
2 |
+
gradio==3.39.0
|
3 |
+
langchain==0.0.265
|
4 |
+
openai==0.27.8
|
5 |
+
tiktoken==0.4.0
|
rivian.txt
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
Rivian secure supply for their EV batteries primarily through which of the following options?
|
|
|
|