远兮 commited on
Commit
26947dd
·
1 Parent(s): bd50541

add langserve

Browse files
agent_chat_memory.ipynb CHANGED
@@ -229,7 +229,7 @@
229
  "name": "python",
230
  "nbconvert_exporter": "python",
231
  "pygments_lexer": "ipython3",
232
- "version": "3.10.11"
233
  },
234
  "orig_nbformat": 4
235
  },
 
229
  "name": "python",
230
  "nbconvert_exporter": "python",
231
  "pygments_lexer": "ipython3",
232
+ "version": "3.11.4"
233
  },
234
  "orig_nbformat": 4
235
  },
langserve/demo.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ from typing import List
3
+
4
+ from fastapi import FastAPI
5
+ from langchain_core.prompts import ChatPromptTemplate
6
+ from langchain_openai import ChatOpenAI
7
+ from langchain_community.document_loaders import WebBaseLoader
8
+ from langchain_openai import OpenAIEmbeddings
9
+ from langchain_community.vectorstores import FAISS
10
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
11
+ from langchain.tools.retriever import create_retriever_tool
12
+ from langchain_community.tools.tavily_search import TavilySearchResults
13
+ from langchain_openai import ChatOpenAI
14
+ from langchain import hub
15
+ from langchain.agents import create_openai_functions_agent
16
+ from langchain.agents import AgentExecutor
17
+ from langchain.pydantic_v1 import BaseModel, Field
18
+ from langchain_core.messages import BaseMessage
19
+ from langserve import add_routes
20
+ from langchain.utilities import SerpAPIWrapper
21
+
22
+ # 1. Load Retriever
23
+ loader = WebBaseLoader("https://docs.smith.langchain.com")
24
+ docs = loader.load()
25
+ text_splitter = RecursiveCharacterTextSplitter()
26
+ documents = text_splitter.split_documents(docs)
27
+ embeddings = OpenAIEmbeddings()
28
+ vector = FAISS.from_documents(documents, embeddings)
29
+ retriever = vector.as_retriever()
30
+
31
+ # 2. Create Tools
32
+ retriever_tool = create_retriever_tool(
33
+ retriever,
34
+ "langsmith_search",
35
+ "Search for information about LangSmith. For any questions about LangSmith, you must use this tool!",
36
+ )
37
+ search = SerpAPIWrapper()
38
+ tools = [retriever_tool, search]
39
+
40
+ # 3. Create Agent
41
+ prompt = hub.pull("hwchase17/openai-functions-agent")
42
+ llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
43
+ agent = create_openai_functions_agent(llm, tools, prompt)
44
+ agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
45
+
46
+
47
+ # 4. App definition
48
+ app = FastAPI(
49
+ title="LangChain Server",
50
+ version="1.0",
51
+ description="A simple API server using LangChain's Runnable interfaces",
52
+ )
53
+
54
+ # 5. Adding chain route
55
+
56
+ # We need to add these input/output schemas because the current AgentExecutor
57
+ # is lacking in schemas.
58
+
59
+ class Input(BaseModel):
60
+ input: str
61
+ chat_history: List[BaseMessage] = Field(
62
+ ...,
63
+ extra={"widget": {"type": "chat", "input": "location"}},
64
+ )
65
+
66
+
67
+ class Output(BaseModel):
68
+ output: str
69
+
70
+ add_routes(
71
+ app,
72
+ agent_executor.with_types(input_type=Input, output_type=Output),
73
+ path="/agent",
74
+ )
75
+
76
+ if __name__ == "__main__":
77
+ import uvicorn
78
+
79
+ uvicorn.run(app, host="localhost", port=8000)
new/chat_history.ipynb ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "from langchain_openai import ChatOpenAI\n",
10
+ "llm = ChatOpenAI()"
11
+ ]
12
+ },
13
+ {
14
+ "cell_type": "code",
15
+ "execution_count": 2,
16
+ "metadata": {},
17
+ "outputs": [],
18
+ "source": [
19
+ "from langchain_community.document_loaders import WebBaseLoader\n",
20
+ "loader = WebBaseLoader(\"https://docs.smith.langchain.com\")\n",
21
+ "\n",
22
+ "docs = loader.load()"
23
+ ]
24
+ },
25
+ {
26
+ "cell_type": "code",
27
+ "execution_count": 3,
28
+ "metadata": {},
29
+ "outputs": [],
30
+ "source": [
31
+ "from langchain_openai import OpenAIEmbeddings\n",
32
+ "\n",
33
+ "embeddings = OpenAIEmbeddings()"
34
+ ]
35
+ },
36
+ {
37
+ "cell_type": "code",
38
+ "execution_count": 4,
39
+ "metadata": {},
40
+ "outputs": [],
41
+ "source": [
42
+ "from langchain_community.vectorstores import FAISS\n",
43
+ "from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
44
+ "\n",
45
+ "\n",
46
+ "text_splitter = RecursiveCharacterTextSplitter()\n",
47
+ "documents = text_splitter.split_documents(docs)\n",
48
+ "vector = FAISS.from_documents(documents, embeddings)"
49
+ ]
50
+ },
51
+ {
52
+ "cell_type": "code",
53
+ "execution_count": 5,
54
+ "metadata": {},
55
+ "outputs": [],
56
+ "source": [
57
+ "from langchain.chains import create_history_aware_retriever\n",
58
+ "from langchain_core.prompts import MessagesPlaceholder\n",
59
+ "from langchain_core.prompts import ChatPromptTemplate\n",
60
+ "\n",
61
+ "# First we need a prompt that we can pass into an LLM to generate this search query\n",
62
+ "\n",
63
+ "prompt = ChatPromptTemplate.from_messages([\n",
64
+ " MessagesPlaceholder(variable_name=\"chat_history\"),\n",
65
+ " (\"user\", \"{input}\"),\n",
66
+ " (\"user\", \"Given the above conversation, generate a search query to look up in order to get information relevant to the conversation\")\n",
67
+ "])\n",
68
+ "retriever_chain = create_history_aware_retriever(llm, vector.as_retriever(), prompt)"
69
+ ]
70
+ },
71
+ {
72
+ "cell_type": "code",
73
+ "execution_count": 6,
74
+ "metadata": {},
75
+ "outputs": [
76
+ {
77
+ "data": {
78
+ "text/plain": [
79
+ "[Document(page_content=\"LangSmith | 🦜ï¸�ğŸ›\\xa0ï¸� LangSmith\\n\\n\\n\\n\\n\\nSkip to main content🦜ï¸�ğŸ›\\xa0ï¸� LangSmith DocsLangChain Python DocsLangChain JS/TS DocsLangSmith API DocsSearchGo to AppLangSmithUser GuideSetupPricing (Coming Soon)Self-HostingTracingEvaluationMonitoringPrompt HubLangSmithOn this pageLangSmithIntroduction​LangSmith is a platform for building production-grade LLM applications.It lets you debug, test, evaluate, and monitor chains and intelligent agents built on any LLM framework and seamlessly integrates with LangChain, the go-to open source framework for building with LLMs.LangSmith is developed by LangChain, the company behind the open source LangChain framework.Quick Start​Tracing: Get started with the tracing quick start.Evaluation: Get started with the evaluation quick start.Next Steps​Check out the following sections to learn more about LangSmith:User Guide: Learn about the workflows LangSmith supports at each stage of the LLM application lifecycle.Setup: Learn how to create an account, obtain an API key, and configure your environment.Pricing: Learn about the pricing model for LangSmith.Self-Hosting: Learn about self-hosting options for LangSmith.Tracing: Learn about the tracing capabilities of LangSmith.Evaluation: Learn about the evaluation capabilities of LangSmith.Prompt Hub Learn about the Prompt Hub, a prompt management tool built into LangSmith.Additional Resources​LangSmith Cookbook: A collection of tutorials and end-to-end walkthroughs using LangSmith.LangChain Python: Docs for the Python LangChain library.LangChain Python API Reference: documentation to review the core APIs of LangChain.LangChain JS: Docs for the TypeScript LangChain libraryDiscord: Join us on our Discord to discuss all things LangChain!Contact SalesIf you're interested in enterprise security and admin features, special deployment options, or access for large teams, reach out to speak with sales.NextUser GuideIntroductionQuick StartNext StepsAdditional ResourcesCommunityDiscordTwitterGitHubDocs CodeLangSmith SDKPythonJS/TSMoreHomepageBlogCopyright © 2024 LangChain, Inc.\", metadata={'source': 'https://docs.smith.langchain.com', 'title': 'LangSmith | 🦜ï¸�ğŸ›\\xa0ï¸� LangSmith', 'description': 'Introduction', 'language': 'en'})]"
80
+ ]
81
+ },
82
+ "execution_count": 6,
83
+ "metadata": {},
84
+ "output_type": "execute_result"
85
+ }
86
+ ],
87
+ "source": [
88
+ "from langchain_core.messages import HumanMessage, AIMessage\n",
89
+ "\n",
90
+ "chat_history = [HumanMessage(content=\"Can LangSmith help test my LLM applications?\"), AIMessage(content=\"Yes!\")]\n",
91
+ "retriever_chain.invoke({\n",
92
+ " \"chat_history\": chat_history,\n",
93
+ " \"input\": \"Tell me how\"\n",
94
+ "})"
95
+ ]
96
+ }
97
+ ],
98
+ "metadata": {
99
+ "kernelspec": {
100
+ "display_name": "base",
101
+ "language": "python",
102
+ "name": "python3"
103
+ },
104
+ "language_info": {
105
+ "codemirror_mode": {
106
+ "name": "ipython",
107
+ "version": 3
108
+ },
109
+ "file_extension": ".py",
110
+ "mimetype": "text/x-python",
111
+ "name": "python",
112
+ "nbconvert_exporter": "python",
113
+ "pygments_lexer": "ipython3",
114
+ "version": "3.11.4"
115
+ }
116
+ },
117
+ "nbformat": 4,
118
+ "nbformat_minor": 2
119
+ }
new/test.ipynb ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 19,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "from langchain_openai import ChatOpenAI\n",
10
+ "llm = ChatOpenAI()"
11
+ ]
12
+ },
13
+ {
14
+ "cell_type": "code",
15
+ "execution_count": 20,
16
+ "metadata": {},
17
+ "outputs": [],
18
+ "source": [
19
+ "from langchain.globals import set_verbose\n",
20
+ "set_verbose(True)\n"
21
+ ]
22
+ },
23
+ {
24
+ "cell_type": "code",
25
+ "execution_count": 21,
26
+ "metadata": {},
27
+ "outputs": [
28
+ {
29
+ "data": {
30
+ "text/plain": [
31
+ "AIMessage(content='Langsmith can help with testing in the following ways:\\n\\n1. Automated testing: Langsmith can help create automated test scripts that can be run repeatedly to check for bugs and errors in the software.\\n\\n2. Test case management: Langsmith can help manage and organize test cases, ensuring that all aspects of the software are properly tested.\\n\\n3. Performance testing: Langsmith can help with performance testing to ensure that the software meets the required performance standards.\\n\\n4. Security testing: Langsmith can help with security testing to identify vulnerabilities in the software and ensure that sensitive data is protected.\\n\\n5. Regression testing: Langsmith can help with regression testing to ensure that new code changes do not negatively impact existing functionality.\\n\\nOverall, Langsmith can provide expertise and tools to streamline the testing process and ensure that the software meets quality standards before release.')"
32
+ ]
33
+ },
34
+ "execution_count": 21,
35
+ "metadata": {},
36
+ "output_type": "execute_result"
37
+ }
38
+ ],
39
+ "source": [
40
+ "llm.invoke(\"how can langsmith help with testing?\")"
41
+ ]
42
+ },
43
+ {
44
+ "cell_type": "code",
45
+ "execution_count": 22,
46
+ "metadata": {},
47
+ "outputs": [],
48
+ "source": [
49
+ "from langchain_core.prompts import ChatPromptTemplate\n",
50
+ "prompt = ChatPromptTemplate.from_messages([\n",
51
+ " (\"system\", \"You are world class technical documentation writer.\"),\n",
52
+ " (\"user\", \"{input}\")\n",
53
+ "])"
54
+ ]
55
+ },
56
+ {
57
+ "cell_type": "code",
58
+ "execution_count": 23,
59
+ "metadata": {},
60
+ "outputs": [],
61
+ "source": [
62
+ "from langchain_core.output_parsers import StrOutputParser\n",
63
+ "\n",
64
+ "output_parser = StrOutputParser()"
65
+ ]
66
+ },
67
+ {
68
+ "cell_type": "code",
69
+ "execution_count": 24,
70
+ "metadata": {},
71
+ "outputs": [
72
+ {
73
+ "data": {
74
+ "text/plain": [
75
+ "\"Langsmith, an advanced language generation model, can greatly assist with testing in various ways:\\n\\n1. Test Case Generation: Langsmith can generate a wide range of test cases automatically based on the given requirements or specifications. It can create diverse inputs, edge cases, and boundary conditions to ensure thorough test coverage.\\n\\n2. Test Data Generation: Langsmith can generate realistic and varied test data sets for testing different scenarios and conditions. This can help in validating the system's performance, scalability, and robustness.\\n\\n3. Test Scenario Simulation: Langsmith can simulate different test scenarios by generating natural language descriptions of user interactions, system responses, and expected outcomes. This can assist in creating comprehensive test scripts and scenarios.\\n\\n4. Test Report Generation: Langsmith can help in automating the generation of test reports by summarizing test results, highlighting issues, and providing insights into the overall test coverage and quality.\\n\\n5. Test Documentation: Langsmith can assist in creating detailed test documentation by generating test plans, test cases, test scripts, and other related materials in a clear and structured manner.\\n\\nOverall, Langsmith can streamline the testing process, improve test coverage, and enhance the quality of testing activities by leveraging its natural language generation capabilities.\""
76
+ ]
77
+ },
78
+ "execution_count": 24,
79
+ "metadata": {},
80
+ "output_type": "execute_result"
81
+ }
82
+ ],
83
+ "source": [
84
+ "chain = prompt | llm | output_parser\n",
85
+ "\n",
86
+ "chain.invoke({\"input\": \"how can langsmith help with testing?\"})"
87
+ ]
88
+ }
89
+ ],
90
+ "metadata": {
91
+ "kernelspec": {
92
+ "display_name": "base",
93
+ "language": "python",
94
+ "name": "python3"
95
+ },
96
+ "language_info": {
97
+ "codemirror_mode": {
98
+ "name": "ipython",
99
+ "version": 3
100
+ },
101
+ "file_extension": ".py",
102
+ "mimetype": "text/x-python",
103
+ "name": "python",
104
+ "nbconvert_exporter": "python",
105
+ "pygments_lexer": "ipython3",
106
+ "version": "3.11.4"
107
+ }
108
+ },
109
+ "nbformat": 4,
110
+ "nbformat_minor": 2
111
+ }
new/test.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_openai import ChatOpenAI
2
+ llm = ChatOpenAI()
3
+
4
+ from langchain_core.prompts import ChatPromptTemplate
5
+ prompt = ChatPromptTemplate.from_messages([
6
+ ("system", "You are world class technical documentation writer."),
7
+ ("user", "{input}")
8
+ ])
9
+
10
+ from langchain_core.output_parsers import StrOutputParser
11
+
12
+ output_parser = StrOutputParser()
13
+
14
+ chain = prompt | llm | output_parser
15
+
16
+ chain.invoke({"input": "how can langsmith help with testing?"})