mpav commited on
Commit
3359285
1 Parent(s): 4d68911

Inicijalni push

Browse files
Files changed (8) hide show
  1. .chainlit/config.toml +84 -0
  2. .gitattributes copy +35 -0
  3. .gitignore +160 -0
  4. Dockerfile +11 -0
  5. README copy.md +190 -0
  6. app.py +80 -0
  7. chainlit.md +3 -0
  8. requirements.txt +5 -0
.chainlit/config.toml ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ # Whether to enable telemetry (default: true). No personal data is collected.
3
+ enable_telemetry = true
4
+
5
+ # List of environment variables to be provided by each user to use the app.
6
+ user_env = []
7
+
8
+ # Duration (in seconds) during which the session is saved when the connection is lost
9
+ session_timeout = 3600
10
+
11
+ # Enable third parties caching (e.g LangChain cache)
12
+ cache = false
13
+
14
+ # Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317)
15
+ # follow_symlink = false
16
+
17
+ [features]
18
+ # Show the prompt playground
19
+ prompt_playground = true
20
+
21
+ # Process and display HTML in messages. This can be a security risk (see https://stackoverflow.com/questions/19603097/why-is-it-dangerous-to-render-user-generated-html-or-javascript)
22
+ unsafe_allow_html = false
23
+
24
+ # Process and display mathematical expressions. This can clash with "$" characters in messages.
25
+ latex = false
26
+
27
+ # Authorize users to upload files with messages
28
+ multi_modal = true
29
+
30
+ # Allows user to use speech to text
31
+ [features.speech_to_text]
32
+ enabled = false
33
+ # See all languages here https://github.com/JamesBrill/react-speech-recognition/blob/HEAD/docs/API.md#language-string
34
+ # language = "en-US"
35
+
36
+ [UI]
37
+ # Name of the app and chatbot.
38
+ name = "Chatbot"
39
+
40
+ # Show the readme while the conversation is empty.
41
+ show_readme_as_default = true
42
+
43
+ # Description of the app and chatbot. This is used for HTML tags.
44
+ # description = ""
45
+
46
+ # Large size content are by default collapsed for a cleaner ui
47
+ default_collapse_content = true
48
+
49
+ # The default value for the expand messages settings.
50
+ default_expand_messages = false
51
+
52
+ # Hide the chain of thought details from the user in the UI.
53
+ hide_cot = false
54
+
55
+ # Link to your github repo. This will add a github button in the UI's header.
56
+ # github = ""
57
+
58
+ # Specify a CSS file that can be used to customize the user interface.
59
+ # The CSS file can be served from the public directory or via an external link.
60
+ # custom_css = "/public/test.css"
61
+
62
+ # Override default MUI light theme. (Check theme.ts)
63
+ [UI.theme.light]
64
+ #background = "#FAFAFA"
65
+ #paper = "#FFFFFF"
66
+
67
+ [UI.theme.light.primary]
68
+ #main = "#F80061"
69
+ #dark = "#980039"
70
+ #light = "#FFE7EB"
71
+
72
+ # Override default MUI dark theme. (Check theme.ts)
73
+ [UI.theme.dark]
74
+ #background = "#FAFAFA"
75
+ #paper = "#FFFFFF"
76
+
77
+ [UI.theme.dark.primary]
78
+ #main = "#F80061"
79
+ #dark = "#980039"
80
+ #light = "#FFE7EB"
81
+
82
+
83
+ [meta]
84
+ generated_by = "0.7.700"
.gitattributes copy ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ #.idea/
Dockerfile ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9
2
+ RUN useradd -m -u 1000 user
3
+ USER user
4
+ ENV HOME=/home/user \
5
+ PATH=/home/user/.local/bin:$PATH
6
+ WORKDIR $HOME/app
7
+ COPY --chown=user . $HOME/app
8
+ COPY ./requirements.txt ~/app/requirements.txt
9
+ RUN pip install -r requirements.txt
10
+ COPY . .
11
+ CMD ["chainlit", "run", "app.py", "--port", "7860"]
README copy.md ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: BeyondChatGPT Demo
3
+ emoji: 📉
4
+ colorFrom: pink
5
+ colorTo: yellow
6
+ sdk: docker
7
+ pinned: false
8
+ app_port: 7860
9
+ ---
10
+
11
+ <p align = "center" draggable=”false” ><img src="https://github.com/AI-Maker-Space/LLM-Dev-101/assets/37101144/d1343317-fa2f-41e1-8af1-1dbb18399719"
12
+ width="200px"
13
+ height="auto"/>
14
+ </p>
15
+
16
+
17
+ ## <h1 align="center" id="heading">:wave: Welcome to Beyond ChatGPT!!</h1>
18
+
19
+ For a step-by-step YouTube video walkthrough, watch this! [Deploying Chainlit app on Hugging Face](https://www.youtube.com/live/pRbbZcL0NMI?si=NAYhMZ_suAY84f06&t=2119)
20
+
21
+ ![Beyond ChatGPT: Build Your First LLM Application](https://github.com/AI-Maker-Space/Beyond-ChatGPT/assets/48775140/cb7a74b8-28af-4d12-a008-8f5a51d47b4c)
22
+
23
+ ## 🤖 Your First LLM App
24
+
25
+ > If you need an introduction to `git`, or information on how to set up API keys for the tools we'll be using in this repository - check out our [Interactive Dev Environment for LLM Development](https://github.com/AI-Maker-Space/Interactive-Dev-Environment-for-LLM-Development/tree/main) which has everything you'd need to get started in this repository!
26
+
27
+ In this repository, we'll walk you through the steps to create a Large Language Model (LLM) application using Chainlit, then containerize it using Docker, and finally deploy it on Huggingface Spaces.
28
+
29
+ Are you ready? Let's get started!
30
+
31
+ <details>
32
+ <summary>🖥️ Accessing "gpt-3.5-turbo" (ChatGPT) like a developer</summary>
33
+
34
+ 1. Head to [this notebook](https://colab.research.google.com/drive/1mOzbgf4a2SP5qQj33ZxTz2a01-5eXqk2?usp=sharing) and follow along with the instructions!
35
+
36
+ 2. Complete the notebook and try out your own system/assistant messages!
37
+
38
+ That's it! Head to the next step and start building your application!
39
+
40
+ </details>
41
+
42
+
43
+ <details>
44
+ <summary>🏗️ Building Your First LLM App</summary>
45
+
46
+ 1. Clone [this](https://github.com/AI-Maker-Space/Beyond-ChatGPT/tree/main) repo.
47
+
48
+ ``` bash
49
+ git clone https://github.com/AI-Maker-Space/Beyond-ChatGPT.git
50
+ ```
51
+
52
+ 2. Navigate inside this repo
53
+ ``` bash
54
+ cd Beyond-ChatGPT
55
+ ```
56
+
57
+ 3. Install the packages required for this python envirnoment in `requirements.txt`.
58
+ ``` bash
59
+ pip install -r requirements.txt
60
+ ```
61
+
62
+ 4. Open your `.env` file. Replace the `###` in your `.env` file with your OpenAI Key and save the file.
63
+ ``` bash
64
+ OPENAI_API_KEY=sk-###
65
+ ```
66
+
67
+ 5. Let's try deploying it locally. Make sure you're in the python environment where you installed Chainlit and OpenAI. Run the app using Chainlit. This may take a minute to run.
68
+ ``` bash
69
+ chainlit run app.py -w
70
+ ```
71
+
72
+ <p align = "center" draggable=”false”>
73
+ <img src="https://github.com/AI-Maker-Space/LLMOps-Dev-101/assets/37101144/54bcccf9-12e2-4cef-ab53-585c1e2b0fb5">
74
+ </p>
75
+
76
+ Great work! Let's see if we can interact with our chatbot.
77
+
78
+ <p align = "center" draggable=”false”>
79
+ <img src="https://github.com/AI-Maker-Space/LLMOps-Dev-101/assets/37101144/854e4435-1dee-438a-9146-7174b39f7c61">
80
+ </p>
81
+
82
+ Awesome! Time to throw it into a docker container and prepare it for shipping!
83
+ </details>
84
+
85
+
86
+
87
+ <details>
88
+ <summary>🐳 Containerizing our App</summary>
89
+
90
+ 1. Let's build the Docker image. We'll tag our image as `llm-app` using the `-t` parameter. The `.` at the end means we want all of the files in our current directory to be added to our image.
91
+
92
+ ``` bash
93
+ docker build -t llm-app .
94
+ ```
95
+
96
+ 2. Run and test the Docker image locally using the `run` command. The `-p`parameter connects our **host port #** to the left of the `:` to our **container port #** on the right.
97
+
98
+ ``` bash
99
+ docker run -p 7860:7860 llm-app
100
+ ```
101
+
102
+ 3. Visit http://localhost:7860 in your browser to see if the app runs correctly.
103
+
104
+ <p align = "center" draggable=”false”>
105
+ <img src="https://github.com/AI-Maker-Space/LLMOps-Dev-101/assets/37101144/2c764f25-09a0-431b-8d28-32246e0ca1b7">
106
+ </p>
107
+
108
+ Great! Time to ship!
109
+ </details>
110
+
111
+
112
+ <details>
113
+ <summary>🚀 Deploying Your First LLM App</summary>
114
+
115
+ 1. Let's create a new Huggingface Space. Navigate to [Huggingface](https://huggingface.co) and click on your profile picture on the top right. Then click on `New Space`.
116
+
117
+ <p align = "center" draggable=”false”>
118
+ <img src="https://github.com/AI-Maker-Space/LLMOps-Dev-101/assets/37101144/f0656408-28b8-4876-9887-8f0c4b882bae">
119
+ </p>
120
+
121
+ 2. Setup your space as shown below:
122
+
123
+ - Owner: Your username
124
+ - Space Name: `llm-app`
125
+ - License: `Openrail`
126
+ - Select the Space SDK: `Docker`
127
+ - Docker Template: `Blank`
128
+ - Space Hardware: `CPU basic - 2 vCPU - 16 GB - Free`
129
+ - Repo type: `Public`
130
+
131
+ <p align = "center" draggable=”false”>
132
+ <img src="https://github.com/AI-Maker-Space/LLMOps-Dev-101/assets/37101144/8f16afd1-6b46-4d9f-b642-8fefe355c5c9">
133
+ </p>
134
+
135
+ 3. You should see something like this. We're now ready to send our files to our Huggingface Space. After cloning, move your files to this repo and push it along with your docker file. You DO NOT need to create a Dockerfile. Make sure NOT TO push your `.env` file. This should automatically be ignored.
136
+
137
+ <p align = "center" draggable=”false”>
138
+ <img src="https://github.com/AI-Maker-Space/LLMOps-Dev-101/assets/37101144/cbf366e2-7613-4223-932a-72c67a73f9c6">
139
+ </p>
140
+
141
+ 4. After pushing all files, navigate to the settings in the top right to add your OpenAI API key.
142
+
143
+ <p align = "center" draggable=”false”>
144
+ <img src="https://github.com/AI-Maker-Space/LLMOps-Dev-101/assets/37101144/a1123a6f-abdd-4f76-bea4-39acf9928762">
145
+ </p>
146
+
147
+ 5. Scroll down to `Variables and secrets` and click on `New secret` on the top right.
148
+
149
+ <p align = "center" draggable=”false”>
150
+ <img src="https://github.com/AI-Maker-Space/LLMOps-Dev-101/assets/37101144/a8a4a25d-752b-4036-b572-93381370c2db">
151
+ </p>
152
+
153
+ 6. Set the name to `OPENAI_API_KEY` and add your OpenAI key under `Value`. Click save.
154
+
155
+ <p align = "center" draggable=”false”>
156
+ <img src="https://github.com/AI-Maker-Space/LLMOps-Dev-101/assets/37101144/0a897538-1779-48ff-bcb4-486af30f7a14">
157
+ </p>
158
+
159
+ 7. To ensure your key is being used, we recommend you `Restart this Space`.
160
+
161
+ <p align = "center" draggable=”false”>
162
+ <img src="https://github.com/AI-Maker-Space/LLMOps-Dev-101/assets/37101144/fb1d83af-6ebe-4676-8bf5-b6d88f07c583">
163
+ </p>
164
+
165
+ 8. Congratulations! You just deployed your first LLM! 🚀🚀🚀 Get on linkedin and post your results and experience! Make sure to tag us at #AIMakerspace !
166
+
167
+ Here's a template to get your post started!
168
+
169
+ ```
170
+ 🚀🎉 Exciting News! 🎉🚀
171
+
172
+ 🏗️ Today, I'm thrilled to announce that I've successfully built and shipped my first-ever LLM using the powerful combination of Chainlit, Docker, and the OpenAI API! 🖥️
173
+
174
+ Check it out 👇
175
+ [LINK TO APP]
176
+
177
+ A big shoutout to the @**AI Makerspace** for all making this possible. Couldn't have done it without the incredible community there. 🤗🙏
178
+
179
+ Looking forward to building with the community! 🙌✨ Here's to many more creations ahead! 🥂🎉
180
+
181
+ Who else is diving into the world of AI? Let's connect! 🌐💡
182
+
183
+ #FirstLLM #Chainlit #Docker #OpenAI #AIMakerspace
184
+ ```
185
+
186
+ </details>
187
+
188
+ <p></p>
189
+
190
+ ### That's it for now! And so it begins.... :)
app.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # You can find this code for Chainlit python streaming here (https://docs.chainlit.io/concepts/streaming/python)
2
+
3
+ # OpenAI Chat completion
4
+ import os
5
+ from openai import AsyncOpenAI # importing openai for API usage
6
+ import chainlit as cl # importing chainlit for our app
7
+ from chainlit.prompt import Prompt, PromptMessage # importing prompt tools
8
+ from chainlit.playground.providers import ChatOpenAI # importing ChatOpenAI tools
9
+ from dotenv import load_dotenv
10
+
11
+ load_dotenv()
12
+
13
+ # ChatOpenAI Templates
14
+ system_template = """You are a helpful assistant who always speaks in a pleasant tone!
15
+ """
16
+
17
+ user_template = """{input}
18
+ Think through your response step by step.
19
+ """
20
+
21
+
22
+ @cl.on_chat_start # marks a function that will be executed at the start of a user session
23
+ async def start_chat():
24
+ settings = {
25
+ "model": "gpt-3.5-turbo",
26
+ "temperature": 0,
27
+ "max_tokens": 500,
28
+ "top_p": 1,
29
+ "frequency_penalty": 0,
30
+ "presence_penalty": 0,
31
+ }
32
+
33
+ cl.user_session.set("settings", settings)
34
+
35
+
36
+ @cl.on_message # marks a function that should be run each time the chatbot receives a message from a user
37
+ async def main(message: cl.Message):
38
+ settings = cl.user_session.get("settings")
39
+
40
+ client = AsyncOpenAI()
41
+
42
+ print(message.content)
43
+
44
+ prompt = Prompt(
45
+ provider=ChatOpenAI.id,
46
+ messages=[
47
+ PromptMessage(
48
+ role="system",
49
+ template=system_template,
50
+ formatted=system_template,
51
+ ),
52
+ PromptMessage(
53
+ role="user",
54
+ template=user_template,
55
+ formatted=user_template.format(input=message.content),
56
+ ),
57
+ ],
58
+ inputs={"input": message.content},
59
+ settings=settings,
60
+ )
61
+
62
+ print([m.to_openai() for m in prompt.messages])
63
+
64
+ msg = cl.Message(content="")
65
+
66
+ # Call OpenAI
67
+ async for stream_resp in await client.chat.completions.create(
68
+ messages=[m.to_openai() for m in prompt.messages], stream=True, **settings
69
+ ):
70
+ token = stream_resp.choices[0].delta.content
71
+ if not token:
72
+ token = ""
73
+ await msg.stream_token(token)
74
+
75
+ # Update the prompt object with the completion
76
+ prompt.completion = msg.content
77
+ msg.prompt = prompt
78
+
79
+ # Send and close the message stream
80
+ await msg.send()
chainlit.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Beyond ChatGPT
2
+
3
+ This Chainlit app was created following instructions from [this repository!](https://github.com/AI-Maker-Space/Beyond-ChatGPT)
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ chainlit==0.7.700
2
+ cohere==4.37
3
+ openai==1.3.5
4
+ tiktoken==0.5.1
5
+ python-dotenv==1.0.0