lucianotonet commited on
Commit
a0d84aa
·
0 Parent(s):
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ __pycache__
2
+ .env
3
+
.vscode/launch.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ // Use IntelliSense to learn about possible attributes.
3
+ // Hover to view descriptions of existing attributes.
4
+ // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
5
+ "version": "0.2.0",
6
+ "configurations": [
7
+ {
8
+ "name": "Python: Current File",
9
+ "type": "python",
10
+ "request": "launch",
11
+ "program": "${file}",
12
+ "console": "integratedTerminal",
13
+ "justMyCode": true
14
+ }
15
+ ]
16
+ }
.vscode/settings.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "nuxt.isNuxtApp": false
3
+ }
Dockerfile ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.11-slim
2
+
3
+ RUN pip install poetry==1.6.1
4
+
5
+ RUN poetry config virtualenvs.create false
6
+
7
+ WORKDIR /code
8
+
9
+ COPY ./pyproject.toml ./README.md ./poetry.lock* ./
10
+
11
+ COPY ./packages ./packages
12
+
13
+ RUN poetry install --no-interaction --no-ansi --no-root
14
+
15
+ COPY ./app ./app
16
+
17
+ RUN poetry install --no-interaction --no-ansi
18
+
19
+ EXPOSE 7860
20
+
21
+ CMD exec uvicorn app.server:app --host 0.0.0.0 --port 7860
Procfile ADDED
@@ -0,0 +1 @@
 
 
1
+ web: uvicorn app.server:app --host 0.0.0.0 --port $PORT
README.md ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # langchain-server
2
+
3
+ ## Installation
4
+
5
+ Install the LangChain CLI if you haven't yet
6
+
7
+ ```bash
8
+ pip install -U langchain-cli
9
+ ```
10
+
11
+ ## Adding packages
12
+
13
+ ```bash
14
+ # adding packages from
15
+ # https://github.com/langchain-ai/langchain/tree/master/templates
16
+ langchain app add $PROJECT_NAME
17
+
18
+ # adding custom GitHub repo packages
19
+ langchain app add --repo $OWNER/$REPO
20
+ # or with whole git string (supports other git providers):
21
+ # langchain app add git+https://github.com/hwchase17/chain-of-verification
22
+
23
+ # with a custom api mount point (defaults to `/{package_name}`)
24
+ langchain app add $PROJECT_NAME --api_path=/my/custom/path/rag
25
+ ```
26
+
27
+ Note: you remove packages by their api path
28
+
29
+ ```bash
30
+ langchain app remove my/custom/path/rag
31
+ ```
32
+
33
+ ## Setup LangSmith (Optional)
34
+ LangSmith will help us trace, monitor and debug LangChain applications.
35
+ LangSmith is currently in private beta, you can sign up [here](https://smith.langchain.com/).
36
+ If you don't have access, you can skip this section
37
+
38
+
39
+ ```shell
40
+ export LANGCHAIN_TRACING_V2=true
41
+ export LANGCHAIN_API_KEY=<your-api-key>
42
+ export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default"
43
+ ```
44
+
45
+ ## Launch LangServe
46
+
47
+ ```bash
48
+ langchain serve
49
+ ```
50
+
51
+ ## Running in Docker
52
+
53
+ This project folder includes a Dockerfile that allows you to easily build and host your LangServe app.
54
+
55
+ ### Building the Image
56
+
57
+ To build the image, you simply:
58
+
59
+ ```shell
60
+ docker build . -t my-langserve-app
61
+ ```
62
+
63
+ If you tag your image with something other than `my-langserve-app`,
64
+ note it for use in the next step.
65
+
66
+ ### Running the Image Locally
67
+
68
+ To run the image, you'll need to include any environment variables
69
+ necessary for your application.
70
+
71
+ In the below example, we inject the `OPENAI_API_KEY` environment
72
+ variable with the value set in my local environment
73
+ (`$OPENAI_API_KEY`)
74
+
75
+ We also expose port 8080 with the `-p 8080:8080` option.
76
+
77
+ ```shell
78
+ docker run -e OPENAI_API_KEY=$OPENAI_API_KEY -p 8080:8080 my-langserve-app
79
+ ```
app/__init__.py ADDED
File without changes
app/server.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ from fastapi.responses import RedirectResponse
3
+ from langserve import add_routes
4
+ from langchain.prompts import ChatPromptTemplate
5
+ from langchain.chat_models import ChatOpenAI
6
+ from dotenv import load_dotenv
7
+
8
+ from langchain_google_genai import ChatGoogleGenerativeAI
9
+
10
+ load_dotenv()
11
+
12
+ app = FastAPI(
13
+ title="LangChain Server",
14
+ version="1.0",
15
+ description="A simple api server using Langchain's Runnable interfaces",
16
+ )
17
+
18
+ add_routes(
19
+ path = "/openai",
20
+ app = app,
21
+ runnable= ChatOpenAI(model="gpt-4-1106-preview"),
22
+ )
23
+
24
+ add_routes(
25
+ path = "/google",
26
+ app = app,
27
+ runnable= ChatGoogleGenerativeAI(model="gemini-pro"),
28
+ )
29
+
30
+ if __name__ == "__main__":
31
+ import uvicorn
32
+
33
+ uvicorn.run(app, host="localhost", port=7860)
packages/README.md ADDED
File without changes
packages/pirate-speak/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 LangChain, Inc.
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
packages/pirate-speak/README.md ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # pirate-speak
3
+
4
+ This template converts user input into pirate speak.
5
+
6
+ ## Environment Setup
7
+
8
+ Set the `OPENAI_API_KEY` environment variable to access the OpenAI models.
9
+
10
+ ## Usage
11
+
12
+ To use this package, you should first have the LangChain CLI installed:
13
+
14
+ ```shell
15
+ pip install -U langchain-cli
16
+ ```
17
+
18
+ To create a new LangChain project and install this as the only package, you can do:
19
+
20
+ ```shell
21
+ langchain app new my-app --package pirate-speak
22
+ ```
23
+
24
+ If you want to add this to an existing project, you can just run:
25
+
26
+ ```shell
27
+ langchain app add pirate-speak
28
+ ```
29
+
30
+ And add the following code to your `server.py` file:
31
+ ```python
32
+ from pirate_speak.chain import chain as pirate_speak_chain
33
+
34
+ add_routes(app, pirate_speak_chain, path="/pirate-speak")
35
+ ```
36
+
37
+ (Optional) Let's now configure LangSmith.
38
+ LangSmith will help us trace, monitor and debug LangChain applications.
39
+ LangSmith is currently in private beta, you can sign up [here](https://smith.langchain.com/).
40
+ If you don't have access, you can skip this section
41
+
42
+
43
+ ```shell
44
+ export LANGCHAIN_TRACING_V2=true
45
+ export LANGCHAIN_API_KEY=<your-api-key>
46
+ export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default"
47
+ ```
48
+
49
+ If you are inside this directory, then you can spin up a LangServe instance directly by:
50
+
51
+ ```shell
52
+ langchain serve
53
+ ```
54
+
55
+ This will start the FastAPI app with a server is running locally at
56
+ [http://localhost:8000](http://localhost:8000)
57
+
58
+ We can see all templates at [http://127.0.0.1:8000/docs](http://127.0.0.1:8000/docs)
59
+ We can access the playground at [http://127.0.0.1:8000/pirate-speak/playground](http://127.0.0.1:8000/pirate-speak/playground)
60
+
61
+ We can access the template from code with:
62
+
63
+ ```python
64
+ from langserve.client import RemoteRunnable
65
+
66
+ runnable = RemoteRunnable("http://localhost:8000/pirate-speak")
67
+ ```
packages/pirate-speak/pirate_speak/__init__.py ADDED
File without changes
packages/pirate-speak/pirate_speak/chain.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.chat_models import ChatOpenAI
2
+ from langchain.prompts import ChatPromptTemplate
3
+
4
+ _prompt = ChatPromptTemplate.from_messages(
5
+ [
6
+ (
7
+ "system",
8
+ "Translate user input into pirate speak",
9
+ ),
10
+ ("human", "{text}"),
11
+ ]
12
+ )
13
+ _model = ChatOpenAI()
14
+
15
+ # if you update this, you MUST also update ../pyproject.toml
16
+ # with the new `tool.langserve.export_attr`
17
+ chain = _prompt | _model
packages/pirate-speak/poetry.lock ADDED
The diff for this file is too large to render. See raw diff
 
packages/pirate-speak/pyproject.toml ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "pirate-speak"
3
+ version = "0.0.1"
4
+ description = "Get started with a simple template that speaks like a pirate"
5
+ authors = []
6
+ readme = "README.md"
7
+
8
+ [tool.poetry.dependencies]
9
+ python = ">=3.8.1,<4.0"
10
+ langchain = ">=0.0.325"
11
+ openai = "<2"
12
+
13
+ [tool.poetry.group.dev.dependencies]
14
+ langchain-cli = ">=0.0.15"
15
+ fastapi = "^0.104.0"
16
+ sse-starlette = "^1.6.5"
17
+
18
+ [tool.langserve]
19
+ export_module = "pirate_speak.chain"
20
+ export_attr = "chain"
21
+
22
+ [tool.templates-hub]
23
+ use-case = "chatbot"
24
+ author = "LangChain"
25
+ integrations = ["OpenAI"]
26
+ tags = ["getting-started"]
27
+
28
+ [build-system]
29
+ requires = [
30
+ "poetry-core",
31
+ ]
32
+ build-backend = "poetry.core.masonry.api"
packages/pirate-speak/tests/__init__.py ADDED
File without changes
poetry.lock ADDED
The diff for this file is too large to render. See raw diff
 
pyproject.toml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "langchain-server"
3
+ version = "0.1.0"
4
+ description = ""
5
+ authors = ["Your Name <[email protected]>"]
6
+ readme = "README.md"
7
+ packages = [
8
+ { include = "app" },
9
+ ]
10
+
11
+ [tool.poetry.dependencies]
12
+ python = "^3.11"
13
+ uvicorn = "^0.23.2"
14
+ langserve = {extras = ["server"], version = ">=0.0.30"}
15
+ pydantic = "<2"
16
+ pirate-speak = {path = "packages\\pirate-speak", develop = true}
17
+ python-dotenv = "^1.0.0"
18
+ langchain-google-genai = "^0.0.5"
19
+ pillow = "^10.1.0"
20
+ fastapi = "^0.105.0"
21
+
22
+
23
+ [tool.poetry.group.dev.dependencies]
24
+ langchain-cli = ">=0.0.15"
25
+
26
+ [build-system]
27
+ requires = ["poetry-core"]
28
+ build-backend = "poetry.core.masonry.api"
runtime.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ python-3.11