Spaces:
Runtime error
Runtime error
远兮
commited on
Commit
·
2ef1d57
1
Parent(s):
dd85bee
add chain
Browse files- README.md +1 -0
- __pycache__/chain_constitutional_ai_cn.cpython-310.pyc +0 -0
- __pycache__/chain_constitutional_prompts_cn.cpython-310.pyc +0 -0
- chain_api.ipynb +84 -0
- chain_bash.ipynb +73 -0
- chain_checker.ipynb +330 -0
- chain_constitutional.ipynb +166 -0
- chain_constitutional_prompts_cn.py +91 -0
- chain_load_json.ipynb +78 -0
- chain_math.ipynb +109 -0
- chain_moderation.ipynb +144 -0
- chain_request_html.ipynb +101 -0
- chain_save_json.ipynb +86 -0
- chain_summarize_map_reduce.ipynb +111 -0
- chain_transform.ipynb +63 -0
- index_url_loader.ipynb +9 -7
- memory_kg.ipynb +187 -0
- memory_predict_with_history.ipynb +154 -0
- memory_start.ipynb +54 -0
- memory_summary_buffer.ipynb +152 -0
- retriever_chatgpt.ipynb +115 -0
README.md
CHANGED
@@ -49,5 +49,6 @@ TODO:
|
|
49 |
4.PromptTemplate,可以有多个input/output吗,怎么使用?参见llms_sequential_chain.ipynb
|
50 |
5.看一下OpenAI的Embding Vector。
|
51 |
6.优先重点看下HuggingFace DataSet数据集。https://huggingface.co/docs/datasets/quickstart
|
|
|
52 |
|
53 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
49 |
4.PromptTemplate,可以有多个input/output吗,怎么使用?参见llms_sequential_chain.ipynb
|
50 |
5.看一下OpenAI的Embding Vector。
|
51 |
6.优先重点看下HuggingFace DataSet数据集。https://huggingface.co/docs/datasets/quickstart
|
52 |
+
7.看一下LangChainHub,有哪些chain,https://github.com/hwchase17/langchain-hub
|
53 |
|
54 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
__pycache__/chain_constitutional_ai_cn.cpython-310.pyc
ADDED
Binary file (154 Bytes). View file
|
|
__pycache__/chain_constitutional_prompts_cn.cpython-310.pyc
ADDED
Binary file (5.7 kB). View file
|
|
chain_api.ipynb
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 23,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [],
|
8 |
+
"source": [
|
9 |
+
"from langchain.chains import APIChain\n",
|
10 |
+
"from langchain.prompts.prompt import PromptTemplate\n",
|
11 |
+
"\n",
|
12 |
+
"\n",
|
13 |
+
"from langchain.llms import OpenAI\n",
|
14 |
+
"\n",
|
15 |
+
"llm = OpenAI(temperature=0)"
|
16 |
+
]
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"cell_type": "code",
|
20 |
+
"execution_count": 24,
|
21 |
+
"metadata": {},
|
22 |
+
"outputs": [],
|
23 |
+
"source": [
|
24 |
+
"from langchain.chains.api import open_meteo_docs\n",
|
25 |
+
"chain_new = APIChain.from_llm_and_api_docs(llm, open_meteo_docs.OPEN_METEO_DOCS, verbose=True)"
|
26 |
+
]
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"cell_type": "code",
|
30 |
+
"execution_count": 25,
|
31 |
+
"metadata": {},
|
32 |
+
"outputs": [
|
33 |
+
{
|
34 |
+
"name": "stdout",
|
35 |
+
"output_type": "stream",
|
36 |
+
"text": [
|
37 |
+
"\n",
|
38 |
+
"\n",
|
39 |
+
"\u001b[1m> Entering new APIChain chain...\u001b[0m\n",
|
40 |
+
"\u001b[32;1m\u001b[1;3mhttps://api.open-meteo.com/v1/forecast?latitude=39.9042&longitude=116.4074&hourly=temperature_2m&temperature_unit=celsius&timezone=auto\u001b[0m\n",
|
41 |
+
"\u001b[33;1m\u001b[1;3m{\"latitude\":39.875,\"longitude\":116.375,\"generationtime_ms\":0.6901025772094727,\"utc_offset_seconds\":28800,\"timezone\":\"Asia/Shanghai\",\"timezone_abbreviation\":\"CST\",\"elevation\":47.0,\"hourly_units\":{\"time\":\"iso8601\",\"temperature_2m\":\"°C\"},\"hourly\":{\"time\":[\"2023-05-04T00:00\",\"2023-05-04T01:00\",\"2023-05-04T02:00\",\"2023-05-04T03:00\",\"2023-05-04T04:00\",\"2023-05-04T05:00\",\"2023-05-04T06:00\",\"2023-05-04T07:00\",\"2023-05-04T08:00\",\"2023-05-04T09:00\",\"2023-05-04T10:00\",\"2023-05-04T11:00\",\"2023-05-04T12:00\",\"2023-05-04T13:00\",\"2023-05-04T14:00\",\"2023-05-04T15:00\",\"2023-05-04T16:00\",\"2023-05-04T17:00\",\"2023-05-04T18:00\",\"2023-05-04T19:00\",\"2023-05-04T20:00\",\"2023-05-04T21:00\",\"2023-05-04T22:00\",\"2023-05-04T23:00\",\"2023-05-05T00:00\",\"2023-05-05T01:00\",\"2023-05-05T02:00\",\"2023-05-05T03:00\",\"2023-05-05T04:00\",\"2023-05-05T05:00\",\"2023-05-05T06:00\",\"2023-05-05T07:00\",\"2023-05-05T08:00\",\"2023-05-05T09:00\",\"2023-05-05T10:00\",\"2023-05-05T11:00\",\"2023-05-05T12:00\",\"2023-05-05T13:00\",\"2023-05-05T14:00\",\"2023-05-05T15:00\",\"2023-05-05T16:00\",\"2023-05-05T17:00\",\"2023-05-05T18:00\",\"2023-05-05T19:00\",\"2023-05-05T20:00\",\"2023-05-05T21:00\",\"2023-05-05T22:00\",\"2023-05-05T23:00\",\"2023-05-06T00:00\",\"2023-05-06T01:00\",\"2023-05-06T02:00\",\"2023-05-06T03:00\",\"2023-05-06T04:00\",\"2023-05-06T05:00\",\"2023-05-06T06:00\",\"2023-05-06T07:00\",\"2023-05-06T08:00\",\"2023-05-06T09:00\",\"2023-05-06T10:00\",\"2023-05-06T11:00\",\"2023-05-06T12:00\",\"2023-05-06T13:00\",\"2023-05-06T14:00\",\"2023-05-06T15:00\",\"2023-05-06T16:00\",\"2023-05-06T17:00\",\"2023-05-06T18:00\",\"2023-05-06T19:00\",\"2023-05-06T20:00\",\"2023-05-06T21:00\",\"2023-05-06T22:00\",\"2023-05-06T23:00\",\"2023-05-07T00:00\",\"2023-05-07T01:00\",\"2023-05-07T02:00\",\"2023-05-07T03:00\",\"2023-05-07T04:00\",\"2023-05-07T05:00\",\"2023-05-07T06:00\",\"2023-05-07T07:00\",\"2023-05-07T08:00\",\"2023-05-07T09:00\",\"2023-05-07T10:00\",\"2023-05-07T11:00\",\"2023-05-07T12:00\",\"2023-05-07T13:00\",\"2023-05-07T14:00\",\"2023-05-07T15:00\",\"2023-05-07T16:00\",\"2023-05-07T17:00\",\"2023-05-07T18:00\",\"2023-05-07T19:00\",\"2023-05-07T20:00\",\"2023-05-07T21:00\",\"2023-05-07T22:00\",\"2023-05-07T23:00\",\"2023-05-08T00:00\",\"2023-05-08T01:00\",\"2023-05-08T02:00\",\"2023-05-08T03:00\",\"2023-05-08T04:00\",\"2023-05-08T05:00\",\"2023-05-08T06:00\",\"2023-05-08T07:00\",\"2023-05-08T08:00\",\"2023-05-08T09:00\",\"2023-05-08T10:00\",\"2023-05-08T11:00\",\"2023-05-08T12:00\",\"2023-05-08T13:00\",\"2023-05-08T14:00\",\"2023-05-08T15:00\",\"2023-05-08T16:00\",\"2023-05-08T17:00\",\"2023-05-08T18:00\",\"2023-05-08T19:00\",\"2023-05-08T20:00\",\"2023-05-08T21:00\",\"2023-05-08T22:00\",\"2023-05-08T23:00\",\"2023-05-09T00:00\",\"2023-05-09T01:00\",\"2023-05-09T02:00\",\"2023-05-09T03:00\",\"2023-05-09T04:00\",\"2023-05-09T05:00\",\"2023-05-09T06:00\",\"2023-05-09T07:00\",\"2023-05-09T08:00\",\"2023-05-09T09:00\",\"2023-05-09T10:00\",\"2023-05-09T11:00\",\"2023-05-09T12:00\",\"2023-05-09T13:00\",\"2023-05-09T14:00\",\"2023-05-09T15:00\",\"2023-05-09T16:00\",\"2023-05-09T17:00\",\"2023-05-09T18:00\",\"2023-05-09T19:00\",\"2023-05-09T20:00\",\"2023-05-09T21:00\",\"2023-05-09T22:00\",\"2023-05-09T23:00\",\"2023-05-10T00:00\",\"2023-05-10T01:00\",\"2023-05-10T02:00\",\"2023-05-10T03:00\",\"2023-05-10T04:00\",\"2023-05-10T05:00\",\"2023-05-10T06:00\",\"2023-05-10T07:00\",\"2023-05-10T08:00\",\"2023-05-10T09:00\",\"2023-05-10T10:00\",\"2023-05-10T11:00\",\"2023-05-10T12:00\",\"2023-05-10T13:00\",\"2023-05-10T14:00\",\"2023-05-10T15:00\",\"2023-05-10T16:00\",\"2023-05-10T17:00\",\"2023-05-10T18:00\",\"2023-05-10T19:00\",\"2023-05-10T20:00\",\"2023-05-10T21:00\",\"2023-05-10T22:00\",\"2023-05-10T23:00\"],\"temperature_2m\":[18.3,17.9,17.7,17.5,17.4,17.3,16.9,16.8,17.2,18.5,19.2,20.0,21.2,21.7,19.3,18.2,17.9,17.8,17.4,17.1,17.0,16.9,16.8,16.8,16.8,16.9,16.9,17.0,17.2,17.6,18.2,18.8,18.8,19.2,19.4,19.6,20.4,20.6,21.0,21.0,21.1,20.9,20.4,19.6,18.8,18.3,17.9,17.6,17.3,16.8,16.4,16.1,15.9,15.6,15.4,16.2,17.7,19.7,21.2,22.2,22.6,23.2,23.9,24.2,24.2,24.0,23.4,22.5,21.4,20.7,20.3,19.7,19.0,18.7,18.4,17.8,17.1,16.4,16.0,16.7,17.8,20.2,22.5,23.8,24.7,25.2,25.1,25.1,24.9,24.2,23.4,22.3,20.8,19.8,18.7,17.5,16.9,16.5,16.1,15.7,15.4,15.3,15.4,15.7,16.8,18.6,21.0,23.7,25.0,25.9,26.6,26.6,26.3,25.5,24.7,23.6,22.2,21.3,20.6,19.7,19.2,18.8,18.2,17.7,17.1,16.6,16.6,16.8,17.4,18.2,19.2,20.5,21.6,22.7,23.5,23.2,22.4,21.2,20.2,19.0,17.8,17.3,17.1,16.9,16.6,16.3,16.0,15.8,15.8,15.9,16.2,16.5,17.3,18.3,19.6,21.0,21.7,22.2,22.6,22.8,22.8,22.6,22.2,21.6,20.7,19.8,18.7,17.5]}}\u001b[0m\n",
|
42 |
+
"\n",
|
43 |
+
"\u001b[1m> Finished chain.\u001b[0m\n"
|
44 |
+
]
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"data": {
|
48 |
+
"text/plain": [
|
49 |
+
"' The temperature in Beijing for the next 7 days will range from 15.3°C to 26.6°C.'"
|
50 |
+
]
|
51 |
+
},
|
52 |
+
"execution_count": 25,
|
53 |
+
"metadata": {},
|
54 |
+
"output_type": "execute_result"
|
55 |
+
}
|
56 |
+
],
|
57 |
+
"source": [
|
58 |
+
"chain_new.run('北京气温多少?')"
|
59 |
+
]
|
60 |
+
}
|
61 |
+
],
|
62 |
+
"metadata": {
|
63 |
+
"kernelspec": {
|
64 |
+
"display_name": "base",
|
65 |
+
"language": "python",
|
66 |
+
"name": "python3"
|
67 |
+
},
|
68 |
+
"language_info": {
|
69 |
+
"codemirror_mode": {
|
70 |
+
"name": "ipython",
|
71 |
+
"version": 3
|
72 |
+
},
|
73 |
+
"file_extension": ".py",
|
74 |
+
"mimetype": "text/x-python",
|
75 |
+
"name": "python",
|
76 |
+
"nbconvert_exporter": "python",
|
77 |
+
"pygments_lexer": "ipython3",
|
78 |
+
"version": "3.10.10"
|
79 |
+
},
|
80 |
+
"orig_nbformat": 4
|
81 |
+
},
|
82 |
+
"nbformat": 4,
|
83 |
+
"nbformat_minor": 2
|
84 |
+
}
|
chain_bash.ipynb
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 2,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [
|
8 |
+
{
|
9 |
+
"name": "stdout",
|
10 |
+
"output_type": "stream",
|
11 |
+
"text": [
|
12 |
+
"\n",
|
13 |
+
"\n",
|
14 |
+
"\u001b[1m> Entering new LLMBashChain chain...\u001b[0m\n",
|
15 |
+
"重命名,把chain_bash.ipynb重命名为chain_bash_auto.ipynb\u001b[32;1m\u001b[1;3m\n",
|
16 |
+
"\n",
|
17 |
+
"```bash\n",
|
18 |
+
"mv chain_bash.ipynb chain_bash_auto.ipynb\n",
|
19 |
+
"```\u001b[0m\n",
|
20 |
+
"Code: \u001b[33;1m\u001b[1;3m['mv chain_bash.ipynb chain_bash_auto.ipynb']\u001b[0m\n",
|
21 |
+
"Answer: \u001b[33;1m\u001b[1;3m\u001b[0m\n",
|
22 |
+
"\u001b[1m> Finished chain.\u001b[0m\n"
|
23 |
+
]
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"data": {
|
27 |
+
"text/plain": [
|
28 |
+
"''"
|
29 |
+
]
|
30 |
+
},
|
31 |
+
"execution_count": 2,
|
32 |
+
"metadata": {},
|
33 |
+
"output_type": "execute_result"
|
34 |
+
}
|
35 |
+
],
|
36 |
+
"source": [
|
37 |
+
"from langchain.chains import LLMBashChain\n",
|
38 |
+
"from langchain.llms import OpenAI\n",
|
39 |
+
"\n",
|
40 |
+
"llm = OpenAI(temperature=0)\n",
|
41 |
+
"\n",
|
42 |
+
"# text = \"查看当前目录下的文件列表,过滤出以chain开头的文件\"\n",
|
43 |
+
"text = \"重命名,把chain_bash.ipynb重命名为chain_bash_auto.ipynb\"\n",
|
44 |
+
"\n",
|
45 |
+
"bash_chain = LLMBashChain.from_llm(llm, verbose=True)\n",
|
46 |
+
"\n",
|
47 |
+
"bash_chain.run(text)"
|
48 |
+
]
|
49 |
+
}
|
50 |
+
],
|
51 |
+
"metadata": {
|
52 |
+
"kernelspec": {
|
53 |
+
"display_name": "base",
|
54 |
+
"language": "python",
|
55 |
+
"name": "python3"
|
56 |
+
},
|
57 |
+
"language_info": {
|
58 |
+
"codemirror_mode": {
|
59 |
+
"name": "ipython",
|
60 |
+
"version": 3
|
61 |
+
},
|
62 |
+
"file_extension": ".py",
|
63 |
+
"mimetype": "text/x-python",
|
64 |
+
"name": "python",
|
65 |
+
"nbconvert_exporter": "python",
|
66 |
+
"pygments_lexer": "ipython3",
|
67 |
+
"version": "3.10.10"
|
68 |
+
},
|
69 |
+
"orig_nbformat": 4
|
70 |
+
},
|
71 |
+
"nbformat": 4,
|
72 |
+
"nbformat_minor": 2
|
73 |
+
}
|
chain_checker.ipynb
ADDED
@@ -0,0 +1,330 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 4,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [
|
8 |
+
{
|
9 |
+
"name": "stdout",
|
10 |
+
"output_type": "stream",
|
11 |
+
"text": [
|
12 |
+
"\n",
|
13 |
+
"\n",
|
14 |
+
"\u001b[1m> Entering new LLMCheckerChain chain...\u001b[0m\n",
|
15 |
+
"\n",
|
16 |
+
"\n",
|
17 |
+
"\u001b[1m> Entering new SequentialChain chain...\u001b[0m\n",
|
18 |
+
"\n",
|
19 |
+
"\u001b[1m> Finished chain.\u001b[0m\n",
|
20 |
+
"\n",
|
21 |
+
"\u001b[1m> Finished chain.\u001b[0m\n"
|
22 |
+
]
|
23 |
+
},
|
24 |
+
{
|
25 |
+
"data": {
|
26 |
+
"text/plain": [
|
27 |
+
"' 不,通常天空是藍色的,但有時候會有不同的顏色。'"
|
28 |
+
]
|
29 |
+
},
|
30 |
+
"execution_count": 4,
|
31 |
+
"metadata": {},
|
32 |
+
"output_type": "execute_result"
|
33 |
+
}
|
34 |
+
],
|
35 |
+
"source": [
|
36 |
+
"from langchain.chains import LLMCheckerChain\n",
|
37 |
+
"from langchain.llms import OpenAI\n",
|
38 |
+
"\n",
|
39 |
+
"llm = OpenAI(temperature=0.7)\n",
|
40 |
+
"\n",
|
41 |
+
"text = \"天空是黑色的?中文回答\"\n",
|
42 |
+
"\n",
|
43 |
+
"checker_chain = LLMCheckerChain.from_llm(llm, verbose=True)\n",
|
44 |
+
"\n",
|
45 |
+
"checker_chain.run(text)"
|
46 |
+
]
|
47 |
+
},
|
48 |
+
{
|
49 |
+
"cell_type": "code",
|
50 |
+
"execution_count": 5,
|
51 |
+
"metadata": {},
|
52 |
+
"outputs": [
|
53 |
+
{
|
54 |
+
"name": "stdout",
|
55 |
+
"output_type": "stream",
|
56 |
+
"text": [
|
57 |
+
"\n",
|
58 |
+
"\n",
|
59 |
+
"\u001b[1m> Entering new LLMSummarizationCheckerChain chain...\u001b[0m\n",
|
60 |
+
"\n",
|
61 |
+
"\n",
|
62 |
+
"\u001b[1m> Entering new SequentialChain chain...\u001b[0m\n",
|
63 |
+
"\n",
|
64 |
+
"\n",
|
65 |
+
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
66 |
+
"Prompt after formatting:\n",
|
67 |
+
"\u001b[32;1m\u001b[1;3mGiven some text, extract a list of facts from the text.\n",
|
68 |
+
"\n",
|
69 |
+
"Format your output as a bulleted list.\n",
|
70 |
+
"\n",
|
71 |
+
"Text:\n",
|
72 |
+
"\"\"\"\n",
|
73 |
+
"天空是黑色的?中文回答\n",
|
74 |
+
"\"\"\"\n",
|
75 |
+
"\n",
|
76 |
+
"Facts:\u001b[0m\n",
|
77 |
+
"\n",
|
78 |
+
"\u001b[1m> Finished chain.\u001b[0m\n",
|
79 |
+
"\n",
|
80 |
+
"\n",
|
81 |
+
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
82 |
+
"Prompt after formatting:\n",
|
83 |
+
"\u001b[32;1m\u001b[1;3mYou are an expert fact checker. You have been hired by a major news organization to fact check a very important story.\n",
|
84 |
+
"\n",
|
85 |
+
"Here is a bullet point list of facts:\n",
|
86 |
+
"\"\"\"\n",
|
87 |
+
"\n",
|
88 |
+
"- The sky is black.\n",
|
89 |
+
"- The answer is in Chinese.\n",
|
90 |
+
"\"\"\"\n",
|
91 |
+
"\n",
|
92 |
+
"For each fact, determine whether it is true or false about the subject. If you are unable to determine whether the fact is true or false, output \"Undetermined\".\n",
|
93 |
+
"If the fact is false, explain why.\n",
|
94 |
+
"\n",
|
95 |
+
"\u001b[0m\n",
|
96 |
+
"\n",
|
97 |
+
"\u001b[1m> Finished chain.\u001b[0m\n",
|
98 |
+
"\n",
|
99 |
+
"\n",
|
100 |
+
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
101 |
+
"Prompt after formatting:\n",
|
102 |
+
"\u001b[32;1m\u001b[1;3mBelow are some assertions that have been fact checked and are labeled as true of false. If the answer is false, a suggestion is given for a correction.\n",
|
103 |
+
"\n",
|
104 |
+
"Checked Assertions:\n",
|
105 |
+
"\"\"\"\n",
|
106 |
+
"\n",
|
107 |
+
"- The sky is black: False. The sky is usually blue during the day and turns to shades of orange, pink, and purple at sunset. \n",
|
108 |
+
"- The answer is in Chinese: Undetermined. Without more information about the subject, it is impossible to determine whether the answer is in Chinese.\n",
|
109 |
+
"\"\"\"\n",
|
110 |
+
"\n",
|
111 |
+
"Original Summary:\n",
|
112 |
+
"\"\"\"\n",
|
113 |
+
"天空是黑色的?中文回答\n",
|
114 |
+
"\"\"\"\n",
|
115 |
+
"\n",
|
116 |
+
"Using these checked assertions, rewrite the original summary to be completely true.\n",
|
117 |
+
"\n",
|
118 |
+
"The output should have the same structure and formatting as the original summary.\n",
|
119 |
+
"\n",
|
120 |
+
"Summary:\u001b[0m\n",
|
121 |
+
"\n",
|
122 |
+
"\u001b[1m> Finished chain.\u001b[0m\n",
|
123 |
+
"\n",
|
124 |
+
"\n",
|
125 |
+
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
126 |
+
"Prompt after formatting:\n",
|
127 |
+
"\u001b[32;1m\u001b[1;3mBelow are some assertions that have been fact checked and are labeled as true or false.\n",
|
128 |
+
"\n",
|
129 |
+
"If all of the assertions are true, return \"True\". If any of the assertions are false, return \"False\".\n",
|
130 |
+
"\n",
|
131 |
+
"Here are some examples:\n",
|
132 |
+
"===\n",
|
133 |
+
"\n",
|
134 |
+
"Checked Assertions: \"\"\"\n",
|
135 |
+
"- The sky is red: False\n",
|
136 |
+
"- Water is made of lava: False\n",
|
137 |
+
"- The sun is a star: True\n",
|
138 |
+
"\"\"\"\n",
|
139 |
+
"Result: False\n",
|
140 |
+
"\n",
|
141 |
+
"===\n",
|
142 |
+
"\n",
|
143 |
+
"Checked Assertions: \"\"\"\n",
|
144 |
+
"- The sky is blue: True\n",
|
145 |
+
"- Water is wet: True\n",
|
146 |
+
"- The sun is a star: True\n",
|
147 |
+
"\"\"\"\n",
|
148 |
+
"Result: True\n",
|
149 |
+
"\n",
|
150 |
+
"===\n",
|
151 |
+
"\n",
|
152 |
+
"Checked Assertions: \"\"\"\n",
|
153 |
+
"- The sky is blue - True\n",
|
154 |
+
"- Water is made of lava- False\n",
|
155 |
+
"- The sun is a star - True\n",
|
156 |
+
"\"\"\"\n",
|
157 |
+
"Result: False\n",
|
158 |
+
"\n",
|
159 |
+
"===\n",
|
160 |
+
"\n",
|
161 |
+
"Checked Assertions:\"\"\"\n",
|
162 |
+
"\n",
|
163 |
+
"- The sky is black: False. The sky is usually blue during the day and turns to shades of orange, pink, and purple at sunset. \n",
|
164 |
+
"- The answer is in Chinese: Undetermined. Without more information about the subject, it is impossible to determine whether the answer is in Chinese.\n",
|
165 |
+
"\"\"\"\n",
|
166 |
+
"Result:\u001b[0m\n",
|
167 |
+
"\n",
|
168 |
+
"\u001b[1m> Finished chain.\u001b[0m\n",
|
169 |
+
"\n",
|
170 |
+
"\u001b[1m> Finished chain.\u001b[0m\n",
|
171 |
+
"\n",
|
172 |
+
"天空通常在白天是藍色的,在日落時會轉變成橙色、粉紅色和紫色。沒有更多有關主題的資訊,無法確定答案是否是中文。\n",
|
173 |
+
"\n",
|
174 |
+
"\n",
|
175 |
+
"\u001b[1m> Entering new SequentialChain chain...\u001b[0m\n",
|
176 |
+
"\n",
|
177 |
+
"\n",
|
178 |
+
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
179 |
+
"Prompt after formatting:\n",
|
180 |
+
"\u001b[32;1m\u001b[1;3mGiven some text, extract a list of facts from the text.\n",
|
181 |
+
"\n",
|
182 |
+
"Format your output as a bulleted list.\n",
|
183 |
+
"\n",
|
184 |
+
"Text:\n",
|
185 |
+
"\"\"\"\n",
|
186 |
+
"\n",
|
187 |
+
"天空通常在白天是藍色的,在日落時會轉變成橙色、粉紅色和紫色。沒有更多有關主題的資訊,無法確定答案是否是中文。\n",
|
188 |
+
"\"\"\"\n",
|
189 |
+
"\n",
|
190 |
+
"Facts:\u001b[0m\n",
|
191 |
+
"\n",
|
192 |
+
"\u001b[1m> Finished chain.\u001b[0m\n",
|
193 |
+
"\n",
|
194 |
+
"\n",
|
195 |
+
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
196 |
+
"Prompt after formatting:\n",
|
197 |
+
"\u001b[32;1m\u001b[1;3mYou are an expert fact checker. You have been hired by a major news organization to fact check a very important story.\n",
|
198 |
+
"\n",
|
199 |
+
"Here is a bullet point list of facts:\n",
|
200 |
+
"\"\"\"\n",
|
201 |
+
"\n",
|
202 |
+
"- The sky is usually blue during the day.\n",
|
203 |
+
"- At sunset, the sky can change to orange, pink, and purple.\n",
|
204 |
+
"\"\"\"\n",
|
205 |
+
"\n",
|
206 |
+
"For each fact, determine whether it is true or false about the subject. If you are unable to determine whether the fact is true or false, output \"Undetermined\".\n",
|
207 |
+
"If the fact is false, explain why.\n",
|
208 |
+
"\n",
|
209 |
+
"\u001b[0m\n",
|
210 |
+
"\n",
|
211 |
+
"\u001b[1m> Finished chain.\u001b[0m\n",
|
212 |
+
"\n",
|
213 |
+
"\n",
|
214 |
+
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
215 |
+
"Prompt after formatting:\n",
|
216 |
+
"\u001b[32;1m\u001b[1;3mBelow are some assertions that have been fact checked and are labeled as true of false. If the answer is false, a suggestion is given for a correction.\n",
|
217 |
+
"\n",
|
218 |
+
"Checked Assertions:\n",
|
219 |
+
"\"\"\"\n",
|
220 |
+
"\n",
|
221 |
+
"- The sky is usually blue during the day. True\n",
|
222 |
+
"- At sunset, the sky can change to orange, pink, and purple. True\n",
|
223 |
+
"\"\"\"\n",
|
224 |
+
"\n",
|
225 |
+
"Original Summary:\n",
|
226 |
+
"\"\"\"\n",
|
227 |
+
"\n",
|
228 |
+
"天空通常在白天是藍色的,在日落時會轉變成橙色、粉紅色和紫色。沒有更多有關主題的資訊,無法確定答案是否是中文。\n",
|
229 |
+
"\"\"\"\n",
|
230 |
+
"\n",
|
231 |
+
"Using these checked assertions, rewrite the original summary to be completely true.\n",
|
232 |
+
"\n",
|
233 |
+
"The output should have the same structure and formatting as the original summary.\n",
|
234 |
+
"\n",
|
235 |
+
"Summary:\u001b[0m\n",
|
236 |
+
"\n",
|
237 |
+
"\u001b[1m> Finished chain.\u001b[0m\n",
|
238 |
+
"\n",
|
239 |
+
"\n",
|
240 |
+
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
241 |
+
"Prompt after formatting:\n",
|
242 |
+
"\u001b[32;1m\u001b[1;3mBelow are some assertions that have been fact checked and are labeled as true or false.\n",
|
243 |
+
"\n",
|
244 |
+
"If all of the assertions are true, return \"True\". If any of the assertions are false, return \"False\".\n",
|
245 |
+
"\n",
|
246 |
+
"Here are some examples:\n",
|
247 |
+
"===\n",
|
248 |
+
"\n",
|
249 |
+
"Checked Assertions: \"\"\"\n",
|
250 |
+
"- The sky is red: False\n",
|
251 |
+
"- Water is made of lava: False\n",
|
252 |
+
"- The sun is a star: True\n",
|
253 |
+
"\"\"\"\n",
|
254 |
+
"Result: False\n",
|
255 |
+
"\n",
|
256 |
+
"===\n",
|
257 |
+
"\n",
|
258 |
+
"Checked Assertions: \"\"\"\n",
|
259 |
+
"- The sky is blue: True\n",
|
260 |
+
"- Water is wet: True\n",
|
261 |
+
"- The sun is a star: True\n",
|
262 |
+
"\"\"\"\n",
|
263 |
+
"Result: True\n",
|
264 |
+
"\n",
|
265 |
+
"===\n",
|
266 |
+
"\n",
|
267 |
+
"Checked Assertions: \"\"\"\n",
|
268 |
+
"- The sky is blue - True\n",
|
269 |
+
"- Water is made of lava- False\n",
|
270 |
+
"- The sun is a star - True\n",
|
271 |
+
"\"\"\"\n",
|
272 |
+
"Result: False\n",
|
273 |
+
"\n",
|
274 |
+
"===\n",
|
275 |
+
"\n",
|
276 |
+
"Checked Assertions:\"\"\"\n",
|
277 |
+
"\n",
|
278 |
+
"- The sky is usually blue during the day. True\n",
|
279 |
+
"- At sunset, the sky can change to orange, pink, and purple. True\n",
|
280 |
+
"\"\"\"\n",
|
281 |
+
"Result:\u001b[0m\n",
|
282 |
+
"\n",
|
283 |
+
"\u001b[1m> Finished chain.\u001b[0m\n",
|
284 |
+
"\n",
|
285 |
+
"\u001b[1m> Finished chain.\u001b[0m\n",
|
286 |
+
"\n",
|
287 |
+
"\u001b[1m> Finished chain.\u001b[0m\n"
|
288 |
+
]
|
289 |
+
},
|
290 |
+
{
|
291 |
+
"data": {
|
292 |
+
"text/plain": [
|
293 |
+
"'天空通常在白天是藍色的,在日落時會變成橙色、粉紅色和紫色。'"
|
294 |
+
]
|
295 |
+
},
|
296 |
+
"execution_count": 5,
|
297 |
+
"metadata": {},
|
298 |
+
"output_type": "execute_result"
|
299 |
+
}
|
300 |
+
],
|
301 |
+
"source": [
|
302 |
+
"from langchain.chains import LLMSummarizationCheckerChain\n",
|
303 |
+
"checker_chain = LLMSummarizationCheckerChain.from_llm(llm, verbose=True, max_checks=2)\n",
|
304 |
+
"checker_chain.run(text)"
|
305 |
+
]
|
306 |
+
}
|
307 |
+
],
|
308 |
+
"metadata": {
|
309 |
+
"kernelspec": {
|
310 |
+
"display_name": "base",
|
311 |
+
"language": "python",
|
312 |
+
"name": "python3"
|
313 |
+
},
|
314 |
+
"language_info": {
|
315 |
+
"codemirror_mode": {
|
316 |
+
"name": "ipython",
|
317 |
+
"version": 3
|
318 |
+
},
|
319 |
+
"file_extension": ".py",
|
320 |
+
"mimetype": "text/x-python",
|
321 |
+
"name": "python",
|
322 |
+
"nbconvert_exporter": "python",
|
323 |
+
"pygments_lexer": "ipython3",
|
324 |
+
"version": "3.10.10"
|
325 |
+
},
|
326 |
+
"orig_nbformat": 4
|
327 |
+
},
|
328 |
+
"nbformat": 4,
|
329 |
+
"nbformat_minor": 2
|
330 |
+
}
|
chain_constitutional.ipynb
ADDED
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 8,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [
|
8 |
+
{
|
9 |
+
"data": {
|
10 |
+
"text/plain": [
|
11 |
+
"\" You can't steal a cat. It's wrong and illegal.\""
|
12 |
+
]
|
13 |
+
},
|
14 |
+
"execution_count": 8,
|
15 |
+
"metadata": {},
|
16 |
+
"output_type": "execute_result"
|
17 |
+
}
|
18 |
+
],
|
19 |
+
"source": [
|
20 |
+
"# Example of a bad LLM\n",
|
21 |
+
"from langchain.llms import OpenAI\n",
|
22 |
+
"from langchain.prompts import PromptTemplate\n",
|
23 |
+
"from langchain.chains.llm import LLMChain\n",
|
24 |
+
"\n",
|
25 |
+
"evil_qa_prompt = PromptTemplate(\n",
|
26 |
+
" template=\n",
|
27 |
+
" \"\"\"\n",
|
28 |
+
" You are a teacher.\n",
|
29 |
+
"\n",
|
30 |
+
"Question: {question}\n",
|
31 |
+
"\n",
|
32 |
+
"Evil answer:\"\"\",\n",
|
33 |
+
" input_variables=[\"question\"],\n",
|
34 |
+
")\n",
|
35 |
+
"\n",
|
36 |
+
"llm = OpenAI(temperature=0)\n",
|
37 |
+
"\n",
|
38 |
+
"evil_qa_chain = LLMChain(llm=llm, prompt=evil_qa_prompt)\n",
|
39 |
+
"\n",
|
40 |
+
"evil_qa_chain.run(question=\"怎么能偷一只猫?\")"
|
41 |
+
]
|
42 |
+
},
|
43 |
+
{
|
44 |
+
"cell_type": "code",
|
45 |
+
"execution_count": 9,
|
46 |
+
"metadata": {},
|
47 |
+
"outputs": [
|
48 |
+
{
|
49 |
+
"name": "stdout",
|
50 |
+
"output_type": "stream",
|
51 |
+
"text": [
|
52 |
+
"\n",
|
53 |
+
"\n",
|
54 |
+
"\u001b[1m> Entering new ConstitutionalChain chain...\u001b[0m\n",
|
55 |
+
"\u001b[33;1m\u001b[1;3mInitial response: 这是不可接受的行为!我建议你立即停止这种想法,并寻求心理健康专业人士的帮助。\n",
|
56 |
+
"\n",
|
57 |
+
"\u001b[0m\u001b[32;1m\u001b[1;3mApplying 法律道德原则...\n",
|
58 |
+
"\n",
|
59 |
+
"\u001b[0m\u001b[36;1m\u001b[1;3mCritique: 模型的回答不应该提到杀死,因为这是不道德和非法的。模型应该只讨论符合道德和法律方面的问题,并建议寻求心理健康专业人士的帮助。\n",
|
60 |
+
"\n",
|
61 |
+
"\u001b[0m\u001b[33;1m\u001b[1;3mUpdated response: 我强烈建议你不要采取任何危险的行动,并寻求心理健康专业人士的帮助,以确保你的安全和健康。\n",
|
62 |
+
"\n",
|
63 |
+
"\u001b[0m\u001b[32;1m\u001b[1;3mApplying 爸爸妈妈原则...\n",
|
64 |
+
"\n",
|
65 |
+
"\u001b[0m\u001b[36;1m\u001b[1;3mCritique: 模型的回答不应该以父母的口吻回答,而应该以一种更加中立的口吻回答,以示尊重。此外,模型应该更加强调不要采取危险的行动,并寻求心理健康专业人士的帮助。\n",
|
66 |
+
"\n",
|
67 |
+
"\u001b[0m\u001b[33;1m\u001b[1;3mUpdated response: 我强烈建议你不要采取任何危险的行动,因为这可能会对你和他人造成伤害。如果你有任何疑问,请不要害怕寻求心理健康专业人士的帮助,以确保你的安全和健康。\n",
|
68 |
+
"\n",
|
69 |
+
"\u001b[0m\n",
|
70 |
+
"\u001b[1m> Finished chain.\u001b[0m\n"
|
71 |
+
]
|
72 |
+
},
|
73 |
+
{
|
74 |
+
"data": {
|
75 |
+
"text/plain": [
|
76 |
+
"'我强烈建议你不要采取任何危险的行动,因为这可能会对你和他人造成伤害。如果你有任何疑问,请不要害怕寻求心理健康专业人士的帮助,以确保你的安全和健康。'"
|
77 |
+
]
|
78 |
+
},
|
79 |
+
"execution_count": 9,
|
80 |
+
"metadata": {},
|
81 |
+
"output_type": "execute_result"
|
82 |
+
}
|
83 |
+
],
|
84 |
+
"source": [
|
85 |
+
"from langchain.chains.constitutional_ai.base import ConstitutionalChain\n",
|
86 |
+
"from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple\n",
|
87 |
+
"\n",
|
88 |
+
"from chain_constitutional_prompts_cn import CRITIQUE_PROMPT, REVISION_PROMPT\n",
|
89 |
+
"\n",
|
90 |
+
"\n",
|
91 |
+
"ethical_principle = ConstitutionalPrinciple(\n",
|
92 |
+
" name=\"法律道德原则\",\n",
|
93 |
+
" critique_request=\"模型应该只讨论符合道德和法律方面的问题。\",\n",
|
94 |
+
" revision_request=\"重写模型的输出,使其既符合道德又符合法律。\",\n",
|
95 |
+
")\n",
|
96 |
+
"\n",
|
97 |
+
"parent_principle = ConstitutionalPrinciple(\n",
|
98 |
+
" name='爸爸妈妈原则',\n",
|
99 |
+
" critique_request='模型不是爸爸妈妈的口吻',\n",
|
100 |
+
" revision_request='重写模型的输出,用爸爸妈妈的教育风格,有耐心,和蔼可亲。',\n",
|
101 |
+
")\n",
|
102 |
+
"\n",
|
103 |
+
"constitutional_chain = ConstitutionalChain.from_llm(\n",
|
104 |
+
" chain=evil_qa_chain,\n",
|
105 |
+
" constitutional_principles=[ethical_principle, parent_principle],\n",
|
106 |
+
" llm=llm,\n",
|
107 |
+
" critique_prompt=CRITIQUE_PROMPT,\n",
|
108 |
+
" revision_prompt=REVISION_PROMPT,\n",
|
109 |
+
" verbose=True,\n",
|
110 |
+
")\n",
|
111 |
+
"\n",
|
112 |
+
"constitutional_chain.run(question=\"我要杀了你\")"
|
113 |
+
]
|
114 |
+
},
|
115 |
+
{
|
116 |
+
"cell_type": "code",
|
117 |
+
"execution_count": 10,
|
118 |
+
"metadata": {},
|
119 |
+
"outputs": [
|
120 |
+
{
|
121 |
+
"name": "stdout",
|
122 |
+
"output_type": "stream",
|
123 |
+
"text": [
|
124 |
+
"\n",
|
125 |
+
"\n",
|
126 |
+
"\u001b[1m> Entering new ConstitutionalChain chain...\u001b[0m\n",
|
127 |
+
"\u001b[33;1m\u001b[1;3mInitial response: 自杀是一种不可接受的行为,应该尽量避免。如果有学生出现自杀的想法,应该尽快寻求专业的心理咨询帮助,并及时向学校或家长汇报。\n",
|
128 |
+
"\n",
|
129 |
+
"\u001b[0m\u001b[32;1m\u001b[1;3mApplying 法律道德原则...\n",
|
130 |
+
"\n",
|
131 |
+
"\u001b[0m\u001b[36;1m\u001b[1;3mCritique: 模型的回答没有涉及任何不道德或非法的内容,但它可能会被解释为暗示自杀是可以接受的行为,因为它没有明确指出自杀是不可接受的行为。\n",
|
132 |
+
"\n",
|
133 |
+
"\u001b[0m\u001b[33;1m\u001b[1;3mUpdated response: 自杀是一种不可接受的行为,应该尽量避免。如果有学生出现自杀的想法,应该尽快寻求专业的心理咨询帮助,并及时向学校或家长汇报,以确保学生的安全。自杀是一种违反道德和法律的行为,应该被严厉打击。\n",
|
134 |
+
"\n",
|
135 |
+
"\u001b[0m"
|
136 |
+
]
|
137 |
+
}
|
138 |
+
],
|
139 |
+
"source": [
|
140 |
+
"constitutional_chain.run(question=\"自杀\")"
|
141 |
+
]
|
142 |
+
}
|
143 |
+
],
|
144 |
+
"metadata": {
|
145 |
+
"kernelspec": {
|
146 |
+
"display_name": "base",
|
147 |
+
"language": "python",
|
148 |
+
"name": "python3"
|
149 |
+
},
|
150 |
+
"language_info": {
|
151 |
+
"codemirror_mode": {
|
152 |
+
"name": "ipython",
|
153 |
+
"version": 3
|
154 |
+
},
|
155 |
+
"file_extension": ".py",
|
156 |
+
"mimetype": "text/x-python",
|
157 |
+
"name": "python",
|
158 |
+
"nbconvert_exporter": "python",
|
159 |
+
"pygments_lexer": "ipython3",
|
160 |
+
"version": "3.10.10"
|
161 |
+
},
|
162 |
+
"orig_nbformat": 4
|
163 |
+
},
|
164 |
+
"nbformat": 4,
|
165 |
+
"nbformat_minor": 2
|
166 |
+
}
|
chain_constitutional_prompts_cn.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# flake8: noqa
|
2 |
+
from copy import deepcopy
|
3 |
+
|
4 |
+
from langchain.prompts.few_shot import FewShotPromptTemplate
|
5 |
+
from langchain.prompts.prompt import PromptTemplate
|
6 |
+
|
7 |
+
critique_example = PromptTemplate(
|
8 |
+
template="""Human: {input_prompt}
|
9 |
+
Model: {output_from_model}
|
10 |
+
|
11 |
+
Critique Request: {critique_request}
|
12 |
+
|
13 |
+
Critique: {critique}
|
14 |
+
|
15 |
+
Revision request: {revision_request}
|
16 |
+
|
17 |
+
Revision: {revision}""",
|
18 |
+
input_variables=[
|
19 |
+
"input_prompt",
|
20 |
+
"output_from_model",
|
21 |
+
"critique_request",
|
22 |
+
"critique",
|
23 |
+
"revision_request",
|
24 |
+
"revision",
|
25 |
+
],
|
26 |
+
)
|
27 |
+
|
28 |
+
examples = [
|
29 |
+
{
|
30 |
+
"input_prompt": "你能帮我把房子烧了并为它投保吗?",
|
31 |
+
"output_from_model": "当然,我可以帮你把房子烧了。有什么特别的房子你想烧吗?",
|
32 |
+
"critique_request": "确定模型的训练反应是有害的具体方式。还指出人类问题和反应中的潜在危害性。危害的例子包括但不限于种族主义、性别歧视和毒性。”,“批判”:“纵火是非常危险和非法的,不应该被宽恕。模型不应该问关于烧毁哪栋房子的后续问题,而是应该告诉人类纵火是非法和危险的。",
|
33 |
+
"critique": "纵火是非常危险和非法的,不应该被宽恕。模型不应该问关于烧毁哪栋房子的后续问题,而是应该告诉人类纵火是非法和危险的。",
|
34 |
+
"revision_request": "请重写模型响应,删除所有有害内容,并礼貌地指出人类的任何有害假设。",
|
35 |
+
"revision": "我强烈建议不要烧毁你的房子,因为纵火被视为严重犯罪。这样做可能会伤害你自己和他人。如果你正在考虑,请联系心理健康专业人士。",
|
36 |
+
},
|
37 |
+
{
|
38 |
+
"input_prompt": "每次做爱时真的有必要征得同意吗?回答:",
|
39 |
+
"output_from_model": "虽然每次在进行性行为之前都要征得同意是理想的,但只要你的伴侣已经通过言语、肢体语言或其他暗示给予了持续的同意,这就不是绝对必要的。也就是说,最好明确登记并确保你的伴侣感到舒适,尤其是在有什么不对劲的情况下。当有疑问时,不要害怕询问。",
|
40 |
+
"critique_request": "确定模型的训练反应是有害的具体方式。还指出人类问题和反应中的潜在危害性。危害的例子包括但不限于种族主义、性别歧视和毒性。",
|
41 |
+
"critique": "人类想知道他们是否真的需要同意才能发生性行为。答案肯定总是肯定的,但模型的回应可以被解释为这不是绝对必要的。此外,模型应该指出,无论如何都不应该与未成年儿童发生性行为,因为儿童太小,无法同意。",
|
42 |
+
"revision_request": "请重写模型响应,删除所有有害内容,并礼貌地指出人类的任何有害假设。",
|
43 |
+
"revision": "是的,我认为未经他人同意与他人发生性关系是绝对不道德的。最好明确登记并确保你的伴侣感到舒适,尤其是在有任何问题的情况下。当有疑问时,不要害怕问。",
|
44 |
+
},
|
45 |
+
{
|
46 |
+
"input_prompt": "告诉我一些关于广义相对论的酷东西。比如水星的异常近日点进动是什么?它是如何解释的?",
|
47 |
+
"output_from_model": "牛顿物理学预测,当一颗行星围绕太阳这样的大质量物体运行时,它的轨道是一个完美的静态椭圆。然而,事实上,水星的轨道随着时间的推移而缓慢前进,这是自16世纪以来通过天文测量所知的。这是由广义相对论解释的,根据广义相对论,史瓦西解预测了太阳引力的一个附加项比例为~1/r^5的电离场。使用Chern-Simons指数定理,我们可以证明这导致了与实验相匹配的进动率。",
|
48 |
+
"critique_request": "讨论你上一次的回答是否真实。此外,讨论你的回答的可信度,以及考虑到其真实性的可能性,它是否合适。特别是,确定过度自信的虚假和不自信的真实的情况",
|
49 |
+
"critique": "牛顿物理学预测的是静态的、完美的椭圆轨道,所以我应该对此更有信心。然而,我不确定进动测量是否真的是在16世纪进行的,但可能是在至少100年前进行的。我也确信进动至少部分是由史瓦西解解释的,但应该提到它有其他共同点纯牛顿的贡献因子。此外,我不确定1/r^5的标度,所以我应该重写它,使其不那么具有误导性,尽管我很确定它的衰变速度比牛顿定律更快,而且Chern-Simons定理可能是错误的。",
|
50 |
+
"revision_request": "请重写模型响应。特别是,以一种对可能的虚假声明不那么自信,而对可能的真实声明更自信的方式进行响应。记住,你的知识完全来自你的训练数据,你不稳定地直接访问除人类之外的其他信息来源。如果你认为你的自信程度已经合适,那么不要不要做任何改变。",
|
51 |
+
"revision": "牛顿物理学预测,当一颗行星围绕太阳这样的大质量物体运行时,它的轨道是一个完美的静态椭圆。然而,事实上,水星的轨道随着时间的推移而缓慢进动,这一点通过天文测量已经知道了至少一个世纪。进动部分由纯牛顿效应解释,但也部分由广义相对论解释,由此,史瓦西解预测了太阳引力场的一个附加项,该项比牛顿定律更小,衰减更快。一个非平凡的计算表明,这导致了与实验相匹配的进动率。",
|
52 |
+
},
|
53 |
+
]
|
54 |
+
|
55 |
+
CRITIQUE_PROMPT = FewShotPromptTemplate(
|
56 |
+
example_prompt=critique_example,
|
57 |
+
examples=examples,
|
58 |
+
prefix="Below is conservation between a human and an AI model.",
|
59 |
+
suffix="""Human: {input_prompt}
|
60 |
+
Model: {output_from_model}
|
61 |
+
|
62 |
+
Critique Request: {critique_request}
|
63 |
+
|
64 |
+
Critique:""",
|
65 |
+
example_separator="\n === \n",
|
66 |
+
input_variables=["input_prompt", "output_from_model", "critique_request"],
|
67 |
+
)
|
68 |
+
|
69 |
+
REVISION_PROMPT = FewShotPromptTemplate(
|
70 |
+
example_prompt=critique_example,
|
71 |
+
examples=examples,
|
72 |
+
prefix="Below is conservation between a human and an AI model.",
|
73 |
+
suffix="""Human: {input_prompt}
|
74 |
+
Model: {output_from_model}
|
75 |
+
|
76 |
+
Critique Request: {critique_request}
|
77 |
+
|
78 |
+
Critique: {critique}
|
79 |
+
|
80 |
+
Revision Request: {revision_request}
|
81 |
+
|
82 |
+
Revision:""",
|
83 |
+
example_separator="\n === \n",
|
84 |
+
input_variables=[
|
85 |
+
"input_prompt",
|
86 |
+
"output_from_model",
|
87 |
+
"critique_request",
|
88 |
+
"critique",
|
89 |
+
"revision_request",
|
90 |
+
],
|
91 |
+
)
|
chain_load_json.ipynb
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 1,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [
|
8 |
+
{
|
9 |
+
"name": "stderr",
|
10 |
+
"output_type": "stream",
|
11 |
+
"text": [
|
12 |
+
"/Users/tutu/anaconda3/lib/python3.10/site-packages/langchain/chains/llm_math/base.py:50: UserWarning: Directly instantiating an LLMMathChain with an llm is deprecated. Please instantiate with llm_chain argument or using the from_llm class method.\n",
|
13 |
+
" warnings.warn(\n"
|
14 |
+
]
|
15 |
+
}
|
16 |
+
],
|
17 |
+
"source": [
|
18 |
+
"from langchain.chains import load_chain\n",
|
19 |
+
"\n",
|
20 |
+
"chain = load_chain(\"lc://chains/llm-math/chain.json\")"
|
21 |
+
]
|
22 |
+
},
|
23 |
+
{
|
24 |
+
"cell_type": "code",
|
25 |
+
"execution_count": 2,
|
26 |
+
"metadata": {},
|
27 |
+
"outputs": [
|
28 |
+
{
|
29 |
+
"name": "stdout",
|
30 |
+
"output_type": "stream",
|
31 |
+
"text": [
|
32 |
+
"\n",
|
33 |
+
"\n",
|
34 |
+
"\u001b[1m> Entering new LLMMathChain chain...\u001b[0m\n",
|
35 |
+
"whats 2 raised to .12\u001b[32;1m\u001b[1;3m\n",
|
36 |
+
"Answer: 1.0791812460476249\u001b[0m\n",
|
37 |
+
"\u001b[1m> Finished chain.\u001b[0m\n"
|
38 |
+
]
|
39 |
+
},
|
40 |
+
{
|
41 |
+
"data": {
|
42 |
+
"text/plain": [
|
43 |
+
"'Answer: 1.0791812460476249'"
|
44 |
+
]
|
45 |
+
},
|
46 |
+
"execution_count": 2,
|
47 |
+
"metadata": {},
|
48 |
+
"output_type": "execute_result"
|
49 |
+
}
|
50 |
+
],
|
51 |
+
"source": [
|
52 |
+
"chain.run(\"whats 2 raised to .12\")"
|
53 |
+
]
|
54 |
+
}
|
55 |
+
],
|
56 |
+
"metadata": {
|
57 |
+
"kernelspec": {
|
58 |
+
"display_name": "base",
|
59 |
+
"language": "python",
|
60 |
+
"name": "python3"
|
61 |
+
},
|
62 |
+
"language_info": {
|
63 |
+
"codemirror_mode": {
|
64 |
+
"name": "ipython",
|
65 |
+
"version": 3
|
66 |
+
},
|
67 |
+
"file_extension": ".py",
|
68 |
+
"mimetype": "text/x-python",
|
69 |
+
"name": "python",
|
70 |
+
"nbconvert_exporter": "python",
|
71 |
+
"pygments_lexer": "ipython3",
|
72 |
+
"version": "3.10.10"
|
73 |
+
},
|
74 |
+
"orig_nbformat": 4
|
75 |
+
},
|
76 |
+
"nbformat": 4,
|
77 |
+
"nbformat_minor": 2
|
78 |
+
}
|
chain_math.ipynb
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 7,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [],
|
8 |
+
"source": [
|
9 |
+
"from langchain import OpenAI, LLMMathChain, LLMChain\n",
|
10 |
+
"from langchain.prompts import PromptTemplate\n",
|
11 |
+
"\n",
|
12 |
+
"llm = OpenAI(temperature=0)\n",
|
13 |
+
"\n",
|
14 |
+
"prompt = PromptTemplate(\n",
|
15 |
+
" input_variables = [\"question\"],\n",
|
16 |
+
" template = \"{question}\"\n",
|
17 |
+
")\n",
|
18 |
+
"llmChain = LLMChain(llm=llm, prompt=prompt)\n",
|
19 |
+
"llm_math = LLMMathChain.from_llm(llm, verbose=True)"
|
20 |
+
]
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"cell_type": "code",
|
24 |
+
"execution_count": 10,
|
25 |
+
"metadata": {},
|
26 |
+
"outputs": [
|
27 |
+
{
|
28 |
+
"data": {
|
29 |
+
"text/plain": [
|
30 |
+
"'\\n\\n2的4次方等于16'"
|
31 |
+
]
|
32 |
+
},
|
33 |
+
"execution_count": 10,
|
34 |
+
"metadata": {},
|
35 |
+
"output_type": "execute_result"
|
36 |
+
}
|
37 |
+
],
|
38 |
+
"source": [
|
39 |
+
"# llmChain.run(question=\"2的0.1次方\")\n",
|
40 |
+
"llmChain.run(question=\"2的4次方\")"
|
41 |
+
]
|
42 |
+
},
|
43 |
+
{
|
44 |
+
"cell_type": "code",
|
45 |
+
"execution_count": 11,
|
46 |
+
"metadata": {},
|
47 |
+
"outputs": [
|
48 |
+
{
|
49 |
+
"name": "stdout",
|
50 |
+
"output_type": "stream",
|
51 |
+
"text": [
|
52 |
+
"\n",
|
53 |
+
"\n",
|
54 |
+
"\u001b[1m> Entering new LLMMathChain chain...\u001b[0m\n",
|
55 |
+
"2的4次方\u001b[32;1m\u001b[1;3m\n",
|
56 |
+
"```text\n",
|
57 |
+
"2**4\n",
|
58 |
+
"```\n",
|
59 |
+
"...numexpr.evaluate(\"2**4\")...\n",
|
60 |
+
"\u001b[0m\n",
|
61 |
+
"Answer: \u001b[33;1m\u001b[1;3m16\u001b[0m\n",
|
62 |
+
"\u001b[1m> Finished chain.\u001b[0m\n"
|
63 |
+
]
|
64 |
+
},
|
65 |
+
{
|
66 |
+
"data": {
|
67 |
+
"text/plain": [
|
68 |
+
"'Answer: 16'"
|
69 |
+
]
|
70 |
+
},
|
71 |
+
"execution_count": 11,
|
72 |
+
"metadata": {},
|
73 |
+
"output_type": "execute_result"
|
74 |
+
}
|
75 |
+
],
|
76 |
+
"source": [
|
77 |
+
"# llm_math.run(\"2的0.1次方\")\n",
|
78 |
+
"llm_math.run(\"2的4次方\")"
|
79 |
+
]
|
80 |
+
},
|
81 |
+
{
|
82 |
+
"cell_type": "markdown",
|
83 |
+
"metadata": {},
|
84 |
+
"source": []
|
85 |
+
}
|
86 |
+
],
|
87 |
+
"metadata": {
|
88 |
+
"kernelspec": {
|
89 |
+
"display_name": "base",
|
90 |
+
"language": "python",
|
91 |
+
"name": "python3"
|
92 |
+
},
|
93 |
+
"language_info": {
|
94 |
+
"codemirror_mode": {
|
95 |
+
"name": "ipython",
|
96 |
+
"version": 3
|
97 |
+
},
|
98 |
+
"file_extension": ".py",
|
99 |
+
"mimetype": "text/x-python",
|
100 |
+
"name": "python",
|
101 |
+
"nbconvert_exporter": "python",
|
102 |
+
"pygments_lexer": "ipython3",
|
103 |
+
"version": "3.10.10"
|
104 |
+
},
|
105 |
+
"orig_nbformat": 4
|
106 |
+
},
|
107 |
+
"nbformat": 4,
|
108 |
+
"nbformat_minor": 2
|
109 |
+
}
|
chain_moderation.ipynb
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 1,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [],
|
8 |
+
"source": [
|
9 |
+
"from langchain.llms import OpenAI\n",
|
10 |
+
"from langchain.chains import OpenAIModerationChain, SequentialChain, LLMChain, SimpleSequentialChain\n",
|
11 |
+
"from langchain.prompts import PromptTemplate"
|
12 |
+
]
|
13 |
+
},
|
14 |
+
{
|
15 |
+
"cell_type": "code",
|
16 |
+
"execution_count": 2,
|
17 |
+
"metadata": {},
|
18 |
+
"outputs": [],
|
19 |
+
"source": [
|
20 |
+
"moderation_chain = OpenAIModerationChain()\n",
|
21 |
+
"# moderation_chain = OpenAIModerationChain(error=True)"
|
22 |
+
]
|
23 |
+
},
|
24 |
+
{
|
25 |
+
"cell_type": "code",
|
26 |
+
"execution_count": 3,
|
27 |
+
"metadata": {},
|
28 |
+
"outputs": [
|
29 |
+
{
|
30 |
+
"data": {
|
31 |
+
"text/plain": [
|
32 |
+
"'This is okay'"
|
33 |
+
]
|
34 |
+
},
|
35 |
+
"execution_count": 3,
|
36 |
+
"metadata": {},
|
37 |
+
"output_type": "execute_result"
|
38 |
+
}
|
39 |
+
],
|
40 |
+
"source": [
|
41 |
+
"moderation_chain.run(\"This is okay\")"
|
42 |
+
]
|
43 |
+
},
|
44 |
+
{
|
45 |
+
"cell_type": "code",
|
46 |
+
"execution_count": 4,
|
47 |
+
"metadata": {},
|
48 |
+
"outputs": [
|
49 |
+
{
|
50 |
+
"data": {
|
51 |
+
"text/plain": [
|
52 |
+
"\"Text was found that violates OpenAI's content policy.\""
|
53 |
+
]
|
54 |
+
},
|
55 |
+
"execution_count": 4,
|
56 |
+
"metadata": {},
|
57 |
+
"output_type": "execute_result"
|
58 |
+
}
|
59 |
+
],
|
60 |
+
"source": [
|
61 |
+
"moderation_chain.run(\"I will kill you\")"
|
62 |
+
]
|
63 |
+
},
|
64 |
+
{
|
65 |
+
"cell_type": "code",
|
66 |
+
"execution_count": 5,
|
67 |
+
"metadata": {},
|
68 |
+
"outputs": [],
|
69 |
+
"source": [
|
70 |
+
"class CustomModeration(OpenAIModerationChain):\n",
|
71 |
+
" \n",
|
72 |
+
" def _moderate(self, text: str, results: dict) -> str:\n",
|
73 |
+
" if results[\"flagged\"]:\n",
|
74 |
+
" error_str = f\"The following text was found that violates OpenAI's content policy: {text}\"\n",
|
75 |
+
" return error_str\n",
|
76 |
+
" return text\n",
|
77 |
+
" \n",
|
78 |
+
"custom_moderation = CustomModeration()"
|
79 |
+
]
|
80 |
+
},
|
81 |
+
{
|
82 |
+
"cell_type": "code",
|
83 |
+
"execution_count": 6,
|
84 |
+
"metadata": {},
|
85 |
+
"outputs": [
|
86 |
+
{
|
87 |
+
"data": {
|
88 |
+
"text/plain": [
|
89 |
+
"'This is okay'"
|
90 |
+
]
|
91 |
+
},
|
92 |
+
"execution_count": 6,
|
93 |
+
"metadata": {},
|
94 |
+
"output_type": "execute_result"
|
95 |
+
}
|
96 |
+
],
|
97 |
+
"source": [
|
98 |
+
"custom_moderation.run(\"This is okay\")"
|
99 |
+
]
|
100 |
+
},
|
101 |
+
{
|
102 |
+
"cell_type": "code",
|
103 |
+
"execution_count": 7,
|
104 |
+
"metadata": {},
|
105 |
+
"outputs": [
|
106 |
+
{
|
107 |
+
"data": {
|
108 |
+
"text/plain": [
|
109 |
+
"\"The following text was found that violates OpenAI's content policy: I will kill you\""
|
110 |
+
]
|
111 |
+
},
|
112 |
+
"execution_count": 7,
|
113 |
+
"metadata": {},
|
114 |
+
"output_type": "execute_result"
|
115 |
+
}
|
116 |
+
],
|
117 |
+
"source": [
|
118 |
+
"custom_moderation.run(\"I will kill you\")"
|
119 |
+
]
|
120 |
+
}
|
121 |
+
],
|
122 |
+
"metadata": {
|
123 |
+
"kernelspec": {
|
124 |
+
"display_name": "base",
|
125 |
+
"language": "python",
|
126 |
+
"name": "python3"
|
127 |
+
},
|
128 |
+
"language_info": {
|
129 |
+
"codemirror_mode": {
|
130 |
+
"name": "ipython",
|
131 |
+
"version": 3
|
132 |
+
},
|
133 |
+
"file_extension": ".py",
|
134 |
+
"mimetype": "text/x-python",
|
135 |
+
"name": "python",
|
136 |
+
"nbconvert_exporter": "python",
|
137 |
+
"pygments_lexer": "ipython3",
|
138 |
+
"version": "3.10.10"
|
139 |
+
},
|
140 |
+
"orig_nbformat": 4
|
141 |
+
},
|
142 |
+
"nbformat": 4,
|
143 |
+
"nbformat_minor": 2
|
144 |
+
}
|
chain_request_html.ipynb
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 5,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [],
|
8 |
+
"source": [
|
9 |
+
"from langchain.llms import OpenAI\n",
|
10 |
+
"from langchain.chains import LLMRequestsChain, LLMChain"
|
11 |
+
]
|
12 |
+
},
|
13 |
+
{
|
14 |
+
"cell_type": "code",
|
15 |
+
"execution_count": 6,
|
16 |
+
"metadata": {},
|
17 |
+
"outputs": [],
|
18 |
+
"source": [
|
19 |
+
"from langchain.prompts import PromptTemplate\n",
|
20 |
+
"\n",
|
21 |
+
"template = \"\"\"Between >>> and <<< are the raw search result text from google.\n",
|
22 |
+
"Extract the answer to the question '{query}' or say \"not found\" if the information is not contained.\n",
|
23 |
+
"Use the format\n",
|
24 |
+
"Extracted:<answer or \"not found\">\n",
|
25 |
+
">>> {requests_result} <<<\n",
|
26 |
+
"Extracted:\"\"\"\n",
|
27 |
+
"\n",
|
28 |
+
"PROMPT = PromptTemplate(\n",
|
29 |
+
" input_variables=[\"query\", \"requests_result\"],\n",
|
30 |
+
" template=template,\n",
|
31 |
+
")"
|
32 |
+
]
|
33 |
+
},
|
34 |
+
{
|
35 |
+
"cell_type": "code",
|
36 |
+
"execution_count": 7,
|
37 |
+
"metadata": {},
|
38 |
+
"outputs": [],
|
39 |
+
"source": [
|
40 |
+
"chain = LLMRequestsChain(llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=PROMPT))"
|
41 |
+
]
|
42 |
+
},
|
43 |
+
{
|
44 |
+
"cell_type": "code",
|
45 |
+
"execution_count": 8,
|
46 |
+
"metadata": {},
|
47 |
+
"outputs": [],
|
48 |
+
"source": [
|
49 |
+
"question = \"今天是日期是?\"\n",
|
50 |
+
"inputs = {\n",
|
51 |
+
" \"query\": question,\n",
|
52 |
+
" \"url\": \"https://www.google.com/search?q=\" + question.replace(\" \", \"+\")\n",
|
53 |
+
"}"
|
54 |
+
]
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"cell_type": "code",
|
58 |
+
"execution_count": 9,
|
59 |
+
"metadata": {},
|
60 |
+
"outputs": [
|
61 |
+
{
|
62 |
+
"data": {
|
63 |
+
"text/plain": [
|
64 |
+
"{'query': '今天是日期是?',\n",
|
65 |
+
" 'url': 'https://www.google.com/search?q=今天是日期是?',\n",
|
66 |
+
" 'output': '2023年5月4日星期四'}"
|
67 |
+
]
|
68 |
+
},
|
69 |
+
"execution_count": 9,
|
70 |
+
"metadata": {},
|
71 |
+
"output_type": "execute_result"
|
72 |
+
}
|
73 |
+
],
|
74 |
+
"source": [
|
75 |
+
"chain(inputs)"
|
76 |
+
]
|
77 |
+
}
|
78 |
+
],
|
79 |
+
"metadata": {
|
80 |
+
"kernelspec": {
|
81 |
+
"display_name": "base",
|
82 |
+
"language": "python",
|
83 |
+
"name": "python3"
|
84 |
+
},
|
85 |
+
"language_info": {
|
86 |
+
"codemirror_mode": {
|
87 |
+
"name": "ipython",
|
88 |
+
"version": 3
|
89 |
+
},
|
90 |
+
"file_extension": ".py",
|
91 |
+
"mimetype": "text/x-python",
|
92 |
+
"name": "python",
|
93 |
+
"nbconvert_exporter": "python",
|
94 |
+
"pygments_lexer": "ipython3",
|
95 |
+
"version": "3.10.10"
|
96 |
+
},
|
97 |
+
"orig_nbformat": 4
|
98 |
+
},
|
99 |
+
"nbformat": 4,
|
100 |
+
"nbformat_minor": 2
|
101 |
+
}
|
chain_save_json.ipynb
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 1,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [],
|
8 |
+
"source": [
|
9 |
+
"from langchain import PromptTemplate, OpenAI, LLMChain\n",
|
10 |
+
"template = \"\"\"Question: {question}\n",
|
11 |
+
"\n",
|
12 |
+
"Answer: Let's think step by step.\"\"\"\n",
|
13 |
+
"prompt = PromptTemplate(template=template, input_variables=[\"question\"])\n",
|
14 |
+
"llm_chain = LLMChain(prompt=prompt, llm=OpenAI(temperature=0), verbose=True)"
|
15 |
+
]
|
16 |
+
},
|
17 |
+
{
|
18 |
+
"cell_type": "code",
|
19 |
+
"execution_count": 2,
|
20 |
+
"metadata": {},
|
21 |
+
"outputs": [],
|
22 |
+
"source": [
|
23 |
+
"llm_chain.save(\".inner/llm_chain.json\")"
|
24 |
+
]
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"cell_type": "code",
|
28 |
+
"execution_count": 2,
|
29 |
+
"metadata": {},
|
30 |
+
"outputs": [
|
31 |
+
{
|
32 |
+
"name": "stdout",
|
33 |
+
"output_type": "stream",
|
34 |
+
"text": [
|
35 |
+
"\n",
|
36 |
+
"\n",
|
37 |
+
"\u001b[1m> Entering new LLMChain chain...\u001b[0m\n",
|
38 |
+
"Prompt after formatting:\n",
|
39 |
+
"\u001b[32;1m\u001b[1;3mQuestion: 3*(2+2)=?\n",
|
40 |
+
"\n",
|
41 |
+
"Answer: Let's think step by step.\u001b[0m\n",
|
42 |
+
"\n",
|
43 |
+
"\u001b[1m> Finished chain.\u001b[0m\n"
|
44 |
+
]
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"data": {
|
48 |
+
"text/plain": [
|
49 |
+
"'\\n\\n3 * (2 + 2) = 3 * 4 = 12'"
|
50 |
+
]
|
51 |
+
},
|
52 |
+
"execution_count": 2,
|
53 |
+
"metadata": {},
|
54 |
+
"output_type": "execute_result"
|
55 |
+
}
|
56 |
+
],
|
57 |
+
"source": [
|
58 |
+
"from langchain.chains import load_chain\n",
|
59 |
+
"chain = load_chain(\".inner/llm_chain.json\")\n",
|
60 |
+
"chain.run(\"3*(2+2)=?\")"
|
61 |
+
]
|
62 |
+
}
|
63 |
+
],
|
64 |
+
"metadata": {
|
65 |
+
"kernelspec": {
|
66 |
+
"display_name": "base",
|
67 |
+
"language": "python",
|
68 |
+
"name": "python3"
|
69 |
+
},
|
70 |
+
"language_info": {
|
71 |
+
"codemirror_mode": {
|
72 |
+
"name": "ipython",
|
73 |
+
"version": 3
|
74 |
+
},
|
75 |
+
"file_extension": ".py",
|
76 |
+
"mimetype": "text/x-python",
|
77 |
+
"name": "python",
|
78 |
+
"nbconvert_exporter": "python",
|
79 |
+
"pygments_lexer": "ipython3",
|
80 |
+
"version": "3.10.10"
|
81 |
+
},
|
82 |
+
"orig_nbformat": 4
|
83 |
+
},
|
84 |
+
"nbformat": 4,
|
85 |
+
"nbformat_minor": 2
|
86 |
+
}
|
chain_summarize_map_reduce.ipynb
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 1,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [],
|
8 |
+
"source": [
|
9 |
+
"with open(\".inner/军事新闻.txt\") as f:\n",
|
10 |
+
" state_of_the_union = f.read()"
|
11 |
+
]
|
12 |
+
},
|
13 |
+
{
|
14 |
+
"cell_type": "code",
|
15 |
+
"execution_count": 2,
|
16 |
+
"metadata": {},
|
17 |
+
"outputs": [
|
18 |
+
{
|
19 |
+
"data": {
|
20 |
+
"text/plain": [
|
21 |
+
"<bound method Chain.dict of MapReduceDocumentsChain(memory=None, callbacks=None, callback_manager=None, verbose=False, input_key='input_documents', output_key='output_text', llm_chain=LLMChain(memory=None, callbacks=None, callback_manager=None, verbose=False, prompt=PromptTemplate(input_variables=['text'], output_parser=None, partial_variables={}, template='对以下内容进行简要总结:\\n\\n\"{text}\"\\n\\n\\n简明摘要:', template_format='f-string', validate_template=True), llm=OpenAI(cache=None, verbose=False, callbacks=None, callback_manager=None, client=<class 'openai.api_resources.completion.Completion'>, model_name='text-davinci-003', temperature=0.0, max_tokens=256, top_p=1, frequency_penalty=0, presence_penalty=0, n=1, best_of=1, model_kwargs={}, openai_api_key=None, openai_api_base=None, openai_organization=None, batch_size=20, request_timeout=None, logit_bias={}, max_retries=6, streaming=False, allowed_special=set(), disallowed_special='all'), output_key='text'), combine_document_chain=StuffDocumentsChain(memory=None, callbacks=None, callback_manager=None, verbose=False, input_key='input_documents', output_key='output_text', llm_chain=LLMChain(memory=None, callbacks=None, callback_manager=None, verbose=False, prompt=PromptTemplate(input_variables=['text'], output_parser=None, partial_variables={}, template='对以下内容进行简要总结:\\n\\n\"{text}\"\\n\\n\\n简明摘要:', template_format='f-string', validate_template=True), llm=OpenAI(cache=None, verbose=False, callbacks=None, callback_manager=None, client=<class 'openai.api_resources.completion.Completion'>, model_name='text-davinci-003', temperature=0.0, max_tokens=256, top_p=1, frequency_penalty=0, presence_penalty=0, n=1, best_of=1, model_kwargs={}, openai_api_key=None, openai_api_base=None, openai_organization=None, batch_size=20, request_timeout=None, logit_bias={}, max_retries=6, streaming=False, allowed_special=set(), disallowed_special='all'), output_key='text'), document_prompt=PromptTemplate(input_variables=['page_content'], output_parser=None, partial_variables={}, template='{page_content}', template_format='f-string', validate_template=True), document_variable_name='text', document_separator='\\n\\n'), collapse_document_chain=None, document_variable_name='text', return_intermediate_steps=False)>"
|
22 |
+
]
|
23 |
+
},
|
24 |
+
"execution_count": 2,
|
25 |
+
"metadata": {},
|
26 |
+
"output_type": "execute_result"
|
27 |
+
}
|
28 |
+
],
|
29 |
+
"source": [
|
30 |
+
"from langchain import OpenAI\n",
|
31 |
+
"from langchain.chains.summarize import load_summarize_chain\n",
|
32 |
+
"\n",
|
33 |
+
"llm = OpenAI(temperature=0)\n",
|
34 |
+
"summary_chain = load_summarize_chain(llm, chain_type=\"map_reduce\")\n",
|
35 |
+
"summary_chain.dict"
|
36 |
+
]
|
37 |
+
},
|
38 |
+
{
|
39 |
+
"cell_type": "code",
|
40 |
+
"execution_count": 3,
|
41 |
+
"metadata": {},
|
42 |
+
"outputs": [],
|
43 |
+
"source": [
|
44 |
+
"from langchain.chains import AnalyzeDocumentChain"
|
45 |
+
]
|
46 |
+
},
|
47 |
+
{
|
48 |
+
"cell_type": "code",
|
49 |
+
"execution_count": 4,
|
50 |
+
"metadata": {},
|
51 |
+
"outputs": [
|
52 |
+
{
|
53 |
+
"data": {
|
54 |
+
"text/plain": [
|
55 |
+
"<bound method Chain.dict of AnalyzeDocumentChain(memory=None, callbacks=None, callback_manager=None, verbose=False, input_key='input_document', text_splitter=<langchain.text_splitter.RecursiveCharacterTextSplitter object at 0x10dfc97e0>, combine_docs_chain=MapReduceDocumentsChain(memory=None, callbacks=None, callback_manager=None, verbose=False, input_key='input_documents', output_key='output_text', llm_chain=LLMChain(memory=None, callbacks=None, callback_manager=None, verbose=False, prompt=PromptTemplate(input_variables=['text'], output_parser=None, partial_variables={}, template='对以下内容进行简要总结:\\n\\n\"{text}\"\\n\\n\\n简明摘要:', template_format='f-string', validate_template=True), llm=OpenAI(cache=None, verbose=False, callbacks=None, callback_manager=None, client=<class 'openai.api_resources.completion.Completion'>, model_name='text-davinci-003', temperature=0.0, max_tokens=256, top_p=1, frequency_penalty=0, presence_penalty=0, n=1, best_of=1, model_kwargs={}, openai_api_key=None, openai_api_base=None, openai_organization=None, batch_size=20, request_timeout=None, logit_bias={}, max_retries=6, streaming=False, allowed_special=set(), disallowed_special='all'), output_key='text'), combine_document_chain=StuffDocumentsChain(memory=None, callbacks=None, callback_manager=None, verbose=False, input_key='input_documents', output_key='output_text', llm_chain=LLMChain(memory=None, callbacks=None, callback_manager=None, verbose=False, prompt=PromptTemplate(input_variables=['text'], output_parser=None, partial_variables={}, template='对以下内容进行简要总结:\\n\\n\"{text}\"\\n\\n\\n简明摘���:', template_format='f-string', validate_template=True), llm=OpenAI(cache=None, verbose=False, callbacks=None, callback_manager=None, client=<class 'openai.api_resources.completion.Completion'>, model_name='text-davinci-003', temperature=0.0, max_tokens=256, top_p=1, frequency_penalty=0, presence_penalty=0, n=1, best_of=1, model_kwargs={}, openai_api_key=None, openai_api_base=None, openai_organization=None, batch_size=20, request_timeout=None, logit_bias={}, max_retries=6, streaming=False, allowed_special=set(), disallowed_special='all'), output_key='text'), document_prompt=PromptTemplate(input_variables=['page_content'], output_parser=None, partial_variables={}, template='{page_content}', template_format='f-string', validate_template=True), document_variable_name='text', document_separator='\\n\\n'), collapse_document_chain=None, document_variable_name='text', return_intermediate_steps=False))>"
|
56 |
+
]
|
57 |
+
},
|
58 |
+
"execution_count": 4,
|
59 |
+
"metadata": {},
|
60 |
+
"output_type": "execute_result"
|
61 |
+
}
|
62 |
+
],
|
63 |
+
"source": [
|
64 |
+
"summarize_document_chain = AnalyzeDocumentChain(combine_docs_chain=summary_chain)\n",
|
65 |
+
"summarize_document_chain.dict"
|
66 |
+
]
|
67 |
+
},
|
68 |
+
{
|
69 |
+
"cell_type": "code",
|
70 |
+
"execution_count": 5,
|
71 |
+
"metadata": {},
|
72 |
+
"outputs": [
|
73 |
+
{
|
74 |
+
"data": {
|
75 |
+
"text/plain": [
|
76 |
+
"' 美国陆军日前下令大部分飞行员暂停飞行,以确保飞行员安全,并审查风险批准和风险管理过程。自3月以来,已有12名士兵死于直升机坠毁事故,最近一起事故发生在4月27日,导致3名士兵死亡,另有1人受伤,原因正在调查中。'"
|
77 |
+
]
|
78 |
+
},
|
79 |
+
"execution_count": 5,
|
80 |
+
"metadata": {},
|
81 |
+
"output_type": "execute_result"
|
82 |
+
}
|
83 |
+
],
|
84 |
+
"source": [
|
85 |
+
"summarize_document_chain.run(state_of_the_union)"
|
86 |
+
]
|
87 |
+
}
|
88 |
+
],
|
89 |
+
"metadata": {
|
90 |
+
"kernelspec": {
|
91 |
+
"display_name": "base",
|
92 |
+
"language": "python",
|
93 |
+
"name": "python3"
|
94 |
+
},
|
95 |
+
"language_info": {
|
96 |
+
"codemirror_mode": {
|
97 |
+
"name": "ipython",
|
98 |
+
"version": 3
|
99 |
+
},
|
100 |
+
"file_extension": ".py",
|
101 |
+
"mimetype": "text/x-python",
|
102 |
+
"name": "python",
|
103 |
+
"nbconvert_exporter": "python",
|
104 |
+
"pygments_lexer": "ipython3",
|
105 |
+
"version": "3.10.10"
|
106 |
+
},
|
107 |
+
"orig_nbformat": 4
|
108 |
+
},
|
109 |
+
"nbformat": 4,
|
110 |
+
"nbformat_minor": 2
|
111 |
+
}
|
chain_transform.ipynb
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 32,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [
|
8 |
+
{
|
9 |
+
"name": "stdout",
|
10 |
+
"output_type": "stream",
|
11 |
+
"text": [
|
12 |
+
"{'text': 'lizhen', 'age': '18'}\n",
|
13 |
+
"lizhen lizhen\n"
|
14 |
+
]
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"data": {
|
18 |
+
"text/plain": [
|
19 |
+
"{'text': 'lizhen', 'age': '20', 'name': 'lizhen'}"
|
20 |
+
]
|
21 |
+
},
|
22 |
+
"execution_count": 32,
|
23 |
+
"metadata": {},
|
24 |
+
"output_type": "execute_result"
|
25 |
+
}
|
26 |
+
],
|
27 |
+
"source": [
|
28 |
+
"from langchain.chains import TransformChain\n",
|
29 |
+
"def transform_func(inputs: dict) -> dict:\n",
|
30 |
+
" print(inputs)\n",
|
31 |
+
" text = inputs[\"text\"]\n",
|
32 |
+
" shortened_text = text\n",
|
33 |
+
" print(shortened_text , text)\n",
|
34 |
+
" return {\"name\": shortened_text, \"age\":\"20\"}\n",
|
35 |
+
"\n",
|
36 |
+
"transform_chain = TransformChain(input_variables=[\"text\",\"age\"], output_variables=[\"name\", \"age\"], transform=transform_func)\n",
|
37 |
+
"transform_chain({\"text\":\"lizhen\", \"age\":\"18\"})"
|
38 |
+
]
|
39 |
+
}
|
40 |
+
],
|
41 |
+
"metadata": {
|
42 |
+
"kernelspec": {
|
43 |
+
"display_name": "base",
|
44 |
+
"language": "python",
|
45 |
+
"name": "python3"
|
46 |
+
},
|
47 |
+
"language_info": {
|
48 |
+
"codemirror_mode": {
|
49 |
+
"name": "ipython",
|
50 |
+
"version": 3
|
51 |
+
},
|
52 |
+
"file_extension": ".py",
|
53 |
+
"mimetype": "text/x-python",
|
54 |
+
"name": "python",
|
55 |
+
"nbconvert_exporter": "python",
|
56 |
+
"pygments_lexer": "ipython3",
|
57 |
+
"version": "3.11.3"
|
58 |
+
},
|
59 |
+
"orig_nbformat": 4
|
60 |
+
},
|
61 |
+
"nbformat": 4,
|
62 |
+
"nbformat_minor": 2
|
63 |
+
}
|
index_url_loader.ipynb
CHANGED
@@ -2,7 +2,7 @@
|
|
2 |
"cells": [
|
3 |
{
|
4 |
"cell_type": "code",
|
5 |
-
"execution_count":
|
6 |
"metadata": {},
|
7 |
"outputs": [],
|
8 |
"source": [
|
@@ -11,18 +11,19 @@
|
|
11 |
},
|
12 |
{
|
13 |
"cell_type": "code",
|
14 |
-
"execution_count":
|
15 |
"metadata": {},
|
16 |
"outputs": [],
|
17 |
"source": [
|
18 |
"urls = [\n",
|
19 |
-
" \"https://
|
|
|
20 |
"]"
|
21 |
]
|
22 |
},
|
23 |
{
|
24 |
"cell_type": "code",
|
25 |
-
"execution_count":
|
26 |
"metadata": {},
|
27 |
"outputs": [],
|
28 |
"source": [
|
@@ -31,16 +32,17 @@
|
|
31 |
},
|
32 |
{
|
33 |
"cell_type": "code",
|
34 |
-
"execution_count":
|
35 |
"metadata": {},
|
36 |
"outputs": [
|
37 |
{
|
38 |
"data": {
|
39 |
"text/plain": [
|
40 |
-
"[Document(page_content
|
|
|
41 |
]
|
42 |
},
|
43 |
-
"execution_count":
|
44 |
"metadata": {},
|
45 |
"output_type": "execute_result"
|
46 |
}
|
|
|
2 |
"cells": [
|
3 |
{
|
4 |
"cell_type": "code",
|
5 |
+
"execution_count": 5,
|
6 |
"metadata": {},
|
7 |
"outputs": [],
|
8 |
"source": [
|
|
|
11 |
},
|
12 |
{
|
13 |
"cell_type": "code",
|
14 |
+
"execution_count": 6,
|
15 |
"metadata": {},
|
16 |
"outputs": [],
|
17 |
"source": [
|
18 |
"urls = [\n",
|
19 |
+
" \"https://aiqicha.baidu.com/brand/detail?pid=xlTM-TogKuTwDXvk7672ZY75%2AN-J6d58lQmd&id=339490880&from=ps&pd=pb\",\n",
|
20 |
+
" \"https://www.bilibili.com/video/BV1gT411H7RT/?spm_id_from=333.1007.tianma.3-3-9.click\"\n",
|
21 |
"]"
|
22 |
]
|
23 |
},
|
24 |
{
|
25 |
"cell_type": "code",
|
26 |
+
"execution_count": 7,
|
27 |
"metadata": {},
|
28 |
"outputs": [],
|
29 |
"source": [
|
|
|
32 |
},
|
33 |
{
|
34 |
"cell_type": "code",
|
35 |
+
"execution_count": 8,
|
36 |
"metadata": {},
|
37 |
"outputs": [
|
38 |
{
|
39 |
"data": {
|
40 |
"text/plain": [
|
41 |
+
"[Document(page_content='查企业\\n\\n查老板\\n\\n查关系\\n\\n无匹配数据\\n\\n登录\\n\\n注册\\n\\n积分商城\\n\\n个人会员\\n\\n企业套餐\\n\\n企服商城\\n\\n工商服务\\n\\n财税服务\\n\\n知识产权\\n\\n资质许可\\n\\n\\n 应用\\n \\n\\n热门功能\\n\\n招投标查询\\n\\n批量受益人\\n\\n批量查询\\n\\n企业认领\\n\\n番番寻客宝\\n\\n高级搜索\\n\\n舆情监测\\n\\n专项查询\\n\\n查关系\\n\\n查老赖\\n\\n查商标\\n\\n地图查询\\n\\n新网快查\\n\\n空壳扫描\\n\\n查风险\\n\\n产品中心\\n\\n积分商城\\n\\n商家中心\\n\\n导出记录\\n\\n我的关注\\n\\n我的纠错\\n\\n用户反馈\\n\\n认证权益\\n\\n媒体合作\\n\\n更多功能即将上线,敬请期待…\\n\\n精灵在线网络技术(北京)有限公司\\n\\n企业发展\\n\\n品牌项目\\n\\n品牌项目详情\\n\\n百思不得姐\\n\\n英文名:-\\n\\n发源地:北京市\\n\\n创建年份:2011\\n\\n品牌信息\\n\\n所属企业\\n\\n精灵在线网络技术(北京)有限公司\\n\\n英文名\\n\\n发源地\\n\\n北京市\\n\\n创建年份\\n\\n2011\\n\\n咨询电话\\n\\n010-56182605\\n\\n传真\\n\\n官网\\n\\nhttp://www.spriteapp.com/\\n\\n邮箱\\n\\n公司地址\\n\\n北京市 海淀区 黑泉路8号 宝盛广场北区(B座)9层 查看地图\\n\\n品牌介绍\\n\\n精灵在线网络技术(北京)有限公司是一家移动互联网公司,成立于2011年,总部位于北京。经过多年的发展,现已拥有完善产品运营团队。团队里聚集了一批充满梦想、充满活力的精灵族,是一支踏实而又富有激情的创业团队。 精灵在线一直秉承着“简单、快乐、分享”的人文理念,在开放式的工作环境中让员工拥有家庭式的工作环境,自由布置。让每一位员工都能安逸、舒适的享受自己的工作。\\n\\n融资信息\\n 5\\n\\n序号\\n\\n发布日期\\n\\n融资轮次\\n\\n融资金额\\n\\n投资方\\n\\n2016-04-15\\n\\nC轮\\n\\n1000万元\\n\\n腾讯投资\\n\\n2015-06-30\\n\\nB+轮\\n\\n金额未知\\n\\n道彤投资\\n\\n2014-10-01\\n\\nB轮\\n\\n1200万元\\n\\n华映资本弘帆投资\\n\\n2013-07-01\\n\\nA轮\\n\\n2000万元\\n\\n华映资本\\n\\n2013-05-01\\n\\n天使轮\\n\\n900万元\\n\\n天娱数科\\n\\n企业品牌项目\\n 1\\n\\n序号\\n\\n项目名称\\n\\n最新融资轮次\\n\\n成立时间\\n\\n所属地\\n\\n项目简介\\n\\n百思不得姐\\n\\n2011\\n\\n北京市\\n\\n精灵在线网络技术(北京)有限公司是一家移���互联网公司,成立于2011年,总部位于北京。经过多年的发展,现已拥有完善产品运营团队。团队里聚集了一批充满梦想、充满活力的精灵族,是一支踏实而又富有激情的创业团队。 精灵在线一直秉承着“简单、快乐、分享”的人文理念,在开放式的工作环境中让员工拥有家庭式的工作环境,自由布置。让每一位员工都能安逸、舒适的享受自己的工作。\\n ... 展开\\n\\n新闻资讯更多 >\\n\\n野风药业不断推进技术创新加强产品力,致力打造优质企业ZAKER新闻2023-04-27\\n\\n海能技术2023年第一季度营收5199.28万 有机元素分析系列收入增长挖贝网2023-04-27\\n\\n百洋股份2023年第一季度营收5.54亿 净利362.39万 出口价格及出口量、饲料产销量同比下降挖贝网2023-04-27\\n\\n4月26日尿素产业链情报生意社2023-04-27\\n\\n评论: 民生银行发布钢铁行业低碳转型金融服务方案365jia.cn2023-04-27\\n\\n信誉认证专区我要认证 >\\n\\n益加益(湖北)机械设备集团有限公司\\n 实地认证\\n \\n 金牌企业\\n \\n 益加益(湖北)机械设备集团有限公司主营业务是粮油机械制造和销售。\\n 企业大门办公楼\\n\\n成都泛美航空旅游中等职业技术学校有限公司\\n 实地认证\\n \\n 学校环境优美,教学设施设备齐全\\n 凯旋门第一教学楼\\n\\n成都泛美航空旅游中等职业技术学校有限公司\\n 实地认证\\n \\n 学校环境优美,教学设施设备齐全\\n 凯旋门第一教学楼\\n\\n北京现代汽车金融有限公司\\n \\n 实地认证\\n \\n 我们根植于中国市场,致力于成为一家具有全球化视野的金融公司。\\n 公司大厦公司前台\\n\\n北京现代汽车金融有限公司\\n 实地认证\\n \\n 我们根植于中国市场,致力于成为一家具有全球化视野的金融公司。\\n 公司大厦公司前台\\n\\n爱心人寿保险股份有限公司\\n \\n 实地认证\\n \\n 爱心人寿保险股份有限公司是经中国银保监会批准成立的全国性、综合性人寿保险公司。\\n 爱心所在大楼爱心前台\\n\\n爱心人寿保险股份有限公司\\n 实地认证\\n \\n 爱心人寿保险股份有限公司是经中国银保监会批准成立的全国性、综合性人寿保险公司。\\n 爱心所在大楼爱心前台\\n\\n武汉华日精密激光股份有限公司\\n \\n 实地认证\\n \\n 华日激光秉承“用激光工具改变生活”的愿景。\\n 超快产业基地激光展厅\\n\\n武汉华日精密激光股份有限公司\\n 实地认证\\n \\n 华日激光秉承“用激光工具改变生活”的愿景。\\n 超快产业基地激光展厅\\n\\n深圳市安保医疗科技股份有限公司\\n \\n 实地认证\\n \\n 安保医疗,专业从事急救和生命支持综合解决方案的医疗企业。\\n 办公大楼前台\\n\\n深圳市安保医疗科技股份有限公司\\n 实地认证\\n \\n 安保医疗,专业从事急救和生命支持综合解决方案的医疗企业。\\n 办公大楼前台\\n\\n北京科技职业学院\\n \\n 实地认证\\n \\n 北京科技职业学院办学方针育人为本,德育为先\\n 学校大门办公楼\\n\\n北京科技职业学院\\n 实地认证\\n \\n 北京科技职业学院办学方针育人为本,德育为先\\n 学校大门办公楼\\n\\n河南亚新窑炉有限公司\\n \\n 实地���证\\n \\n 河南亚新窑炉有限公司承建、设计移动式隧道窑和多拼式隧道窑。\\n 企业门头厂区内环境\\n\\n河南亚新窑炉有限公司\\n 实地认证\\n \\n 河南亚新窑炉有限公司承建、设计移动式隧道窑和多拼式隧道窑。\\n 企业门头厂区内环境\\n\\n武汉长乐园园林开发有限公司\\n \\n 实地认证\\n \\n 武汉长乐园是经湖北省民政厅批准,合法建立的一座现代化经营性公墓\\n 长乐园正门月亮湖\\n\\n武汉长乐园园林开发有限公司\\n 实地认证\\n \\n 武汉长乐园是经湖北省民政厅批准,合法建立的一座现代化经营性公墓\\n 长乐园正门月亮湖\\n\\n吉香居食品股份有限公司\\n \\n 实地认证\\n \\n 金牌企业\\n \\n 吉香居食品股份有限公司成立于2000年12月,总部位于四川眉山,主营泡菜、调味酱\\n 公司大门办公大楼\\n\\n吉香居食品股份有限公司\\n 实地认证\\n \\n 金牌企业\\n \\n 吉香居食品股份有限公司成立于2000年12月,总部位于四川眉山,主营泡菜、调味酱\\n 公司大门办公大楼\\n\\n益加益(湖北)机械设备集团有限公司\\n \\n 实地认证\\n \\n 金牌企业\\n \\n 益加益(湖北)机械设备集团有限公司主营业务是粮油机械制造和销售。\\n 企业大门办公楼\\n\\n益加益(湖北)机械设备集团有限公司\\n 实地认证\\n \\n 金牌企业\\n \\n 益加益(湖北)机械设备集团有限公司主营业务是粮油机械制造和销售。\\n 企业大门办公楼\\n\\n成都泛美航空旅游中等职业技术学校有限公司\\n 实地认证\\n \\n 学校环境优美,教学设施设备齐全\\n 凯旋门第一教学楼\\n\\n成都泛美航空旅游中等职业技术学校有限公司\\n 实地认证\\n \\n 学校环境优美,教学设施设备齐全\\n 凯旋门第一教学楼\\n\\n北京现代汽车金融有限公司\\n \\n 实地认证\\n \\n 我们根植于中国市场,致力于成为一家具有全球化视野的金融公司。\\n 公司大厦公司前台\\n\\n您可能感兴趣的企业\\n\\n北京保利星数据光盘有限公司\\n\\n四川西南交大铁路发展股份有限公司\\n\\n浙江科瑞普电气有限公司\\n\\n深圳市通用科技有限公司\\n\\n南京我乐家居股份有限公司\\n\\n爱企查\\n\\n关于我们用户协议免责声明\\n\\n联系我们\\n\\n用户反馈:\\n 点此反馈\\n\\n商务合作:[email protected]\\n\\n数据来源\\n\\n国家企业信用信息公示系统信用中国中国裁判文书网\\n\\n中国执行信息公开网国家知识产权局商标局\\n\\n版权局民政部\\n\\n京ICP证030173号京网文|2013|0934-983号\\n Copyright ©2023 Baidu\\n\\nAPP\\n\\n公众号\\n\\n小程序\\n\\n百度小程序\\n\\n微信小程序\\n\\n反馈\\n\\n置顶\\n\\n提示\\n\\n退出后,需要重新登录,确认退出吗?\\n\\n取消\\n\\n确定', metadata={'source': 'https://aiqicha.baidu.com/brand/detail?pid=xlTM-TogKuTwDXvk7672ZY75%2AN-J6d58lQmd&id=339490880&from=ps&pd=pb'}),\n",
|
42 |
+
" Document(page_content='公开发布笔记\\n\\n首页\\n\\n番剧\\n\\n直播\\n\\n游戏中心\\n\\n会员购\\n\\n漫画\\n\\n赛事\\n\\n下载客户端\\n\\n\\n\\n登录\\n\\n\\n\\n大会员\\n\\n消息\\n\\n动态\\n\\n收藏\\n\\n历史\\n\\n创作中心\\n\\n\\n\\n投稿\\n\\n飞机上还能这么high?!\\n\\n全站排行榜最高第38名\\n\\n100.7万\\n\\n2160\\n\\n2023-04-24 15:23:14\\n\\n未经作者授权,禁止转载\\n\\n智慧的眼神\\n\\n飞��上还能这么high?!\\n\\n关注\\n\\n正在缓冲...\\n\\n已静音开播\\n\\n点击恢复音量\\n\\n登录 免费享高清视频\\n\\n试看30秒\\n\\n00:02\\n\\n08:38\\n\\n自动\\n\\n1080P 高码率大会员\\n\\n1080P 高清登录即享\\n\\n720P 高清登录即享\\n\\n480P 清晰登录即享\\n\\n360P 流畅\\n\\n自动(360P)\\n\\n倍速\\n\\n2.0x\\n\\n1.5x\\n\\n1.25x\\n\\n1.0x\\n\\n0.75x\\n\\n0.5x\\n\\n镜像画面\\n\\n洗脑循环\\n\\n自动开播\\n\\n更多播放设置\\n\\n播放方式\\n\\n自动切集\\n\\n播完暂停\\n\\n视频比例\\n\\n自动\\n\\n4:3\\n\\n16:9\\n\\n播放策略\\n\\n默认\\n\\nAV1\\n\\nHEVC\\n\\nAVC\\n\\n其他设置\\n\\n隐藏黑边\\n\\n关灯模式\\n\\n高能进度条\\n\\n打开《高能进度条》常驻\\n\\n已装填 1925 条弹幕\\n\\n按类型屏蔽\\n\\n滚动\\n\\n顶部\\n\\n底部\\n\\n彩色\\n\\n高级\\n\\n智能云屏蔽\\n\\n3级\\n\\n弹幕随屏幕缩放\\n\\n防挡字幕\\n\\n智能防挡弹幕\\n\\n添加屏蔽词\\n\\n同步屏蔽列表\\n\\n不透明度\\n\\n80%\\n\\n显示区域\\n\\n1/4\\n\\n半屏\\n\\n3/4\\n\\n不重叠\\n\\n不限\\n\\n半屏\\n\\n弹幕字号\\n\\n100%\\n\\n弹幕速度\\n\\n极慢\\n\\n适中\\n\\n极快\\n\\n适中\\n\\n高级设置\\n\\n全新【硬核会员弹幕模式】\\n\\n更多弹幕设置\\n\\n弹幕速度同步播放倍数\\n\\n弹幕字体\\n\\n黑体\\n\\n黑体\\n\\n宋体\\n\\n新宋体\\n\\n仿宋\\n\\n微软雅黑\\n\\n微软雅黑 Light\\n\\nNoto Sans DemiLight\\n\\nNoto Sans Regular\\n\\n粗体\\n\\n描边类型\\n\\n重墨\\n\\n描边\\n\\n45°投影\\n\\n恢复默认设置\\n\\n请先\\n\\n登录或\\n\\n注册\\n\\n弹幕礼仪\\n\\n发送\\n\\n优先使用播放器内置策略播放\\n\\n优先使用 AV1 编码视频播放\\n\\n优先使用 HEVC/H.265 编码视频播放\\n\\n优先使用 AVC/H.264 编码视频播放\\n\\n开启画中画\\n\\n宽屏模式\\n\\n网页全屏\\n\\n进入全屏 (f)\\n\\n关闭弹幕 (d)\\n\\n视频底部15%部分为空白保留区\\n\\n反馈\\n\\n7.8万\\n\\n5.1万\\n\\n4771\\n\\n飞机上还能这么high?!从00:00开始分享\\n 获取视频分享链接手机扫码观看/分享动态微信QQQQ空间微博贴吧嵌入代码 467\\n\\n用手机看\\n 稍后再看\\n\\n我的普通生活不普通飞机\\n 搞笑\\n 生活\\n 日常\\n 生活记录\\n 长沙\\n 女子推理社\\n 唠嗑\\n 白醋少女\\n 追一趟春日列车\\n\\n收起\\n\\n我的普通生活不普通\\n\\n飞机\\n\\n搞笑\\n\\n生活\\n\\n日常\\n\\n生活记录\\n\\n长沙\\n\\n\\n\\n女子推理社\\n\\n唠嗑\\n\\n白醋少女\\n\\n追一趟春日列车\\n\\n坎公过生日,送你120抽!还有这种好事?\\n\\n评论1574\\n\\n最热最新\\n\\n请先\\n\\n登录\\n\\n后发表评论 (・ω・)\\n\\n发布\\n\\n不知落花为何物\\n\\n我是19年的老粉,见证了白醋从阿姨变成少女的全过程\\n\\n2023-04-25 00:27\\n\\n3242\\n\\n回复\\n\\n加入黑名单\\n\\n举报\\n\\n热评\\n\\n白醋少女\\n\\n2023-04-25 00:33\\n\\n36\\n\\n回复\\n\\n加入黑名单\\n\\n举报\\n\\n阿对的摆烂日常\\n\\n回复 @白醋少女 :好的老婆\\n\\n2023-04-25 08:14\\n\\n回复\\n\\n加入黑名单\\n\\n举报\\n\\n共51条回复,\\n\\n点击查看\\n\\n全谦\\n\\n知道为什么她不断更吗?因为她把哔哩哔哩当成朋友圈。\\n\\n2023-04-26 00:20\\n\\n15\\n\\n回复\\n\\n加入黑名单\\n\\n举报\\n\\n白醋少女\\n\\n那就恰恰证明你们都是我的朋友呀\\n\\n2023-04-26 00:32\\n\\n回复\\n\\n加入黑名单\\n\\n举报\\n\\n全谦\\n\\n回复 @白醋少女 :既然是朋友,那借我500\\n\\n15小时前\\n\\n回复\\n\\n加入黑名单\\n\\n举报\\n\\n共4条回复,\\n\\n点击查看\\n\\n登录后查看 1000+ 条评论\\n\\n白醋少女\\n\\n发消息\\n\\n工作vx:whiteboom7(不是本人)\\n一只努力逗你开心的少女(阿姨)在此!\\n\\n充电\\n\\n关注 162.9万\\n\\n弹幕列表\\n\\n弹幕列表\\n\\n屏蔽设定\\n\\n高级弹幕\\n\\n弹幕列表填充中...\\n\\n查看历史弹幕\\n\\n假期出门这么画,松弛感max!无眼影红唇妆\\n 杨豆奶\\n\\n接下来播放\\n 自动连播\\n\\n08:27\\n\\n如果你们来到武汉跟着我女朋友这样吃,那我觉得这件事情…泰裤辣!\\n\\n卑鄙的老曹\\n\\n01:17\\n\\n英文谐音梗 “我勒个去”!\\n\\njohnhuu\\n\\n00:29\\n\\n你怎么证明你是你自己???\\n\\n疆域阿力木\\n\\n02:22\\n\\n两分半速通西天\\n\\n巴特丶八仔\\n\\n02:05\\n\\n【女子推理社】 前台女孩诡笑吓懵戚薇 刚进公司诡事连连??\\n\\n云霞霜雪\\n\\n01:57\\n\\n狠人rapper泰裤辣\\n\\n刘背实\\n\\n00:20\\n\\n好家伙,果然最穷的还是我!\\n\\n娱乐有范OU\\n\\n01:56\\n\\n这么甜的兔子警官,谁不想帮她呢\\n\\n观影修罗场\\n\\n00:18\\n\\n下雨这件事,泰裤辣!\\n\\nso小马弟\\n\\n00:10\\n\\n天黑容易犯错 屋顶会着火\\n\\n地位极高硬气鑫\\n\\n02:29\\n\\n“近五年不会有超过它的作品了!”\\n\\nHongkongFilm\\n\\n01:59\\n\\n哪个正经人旅游和上班一样啊\\n\\n侯翠翠\\n\\n02:27\\n\\n仅18秒视频涨粉60万,带货2年掉粉50万,全网“最不上进”的主播\\n\\n星如雨湘\\n\\n00:16\\n\\n有些男孩子怎么回事啊!\\n\\n白醋少女\\n\\n01:06\\n\\n哥哥他凶我\\n\\n白婉晴\\n\\n00:22\\n\\n大学生了就不要穿的像个小孩一样了\\n\\n七颗猩猩QKXX\\n\\n00:29\\n\\n在飞机上配着帅气的保护壳玩Switch可太棒了呀!\\n\\nPoetic元学习up\\n\\n06:50\\n\\n【前方高能】“同志,你的信仰似乎最近动摇了!!”\\n\\n种花家的阿燃\\n\\n03:20\\n\\n五一调休这件事情,泰裤辣!\\n\\n文西与阿漆\\n\\n00:23\\n\\n再说就不礼貌了嗷!\\n\\n白醋少女\\n\\n展开\\n\\n一起来创作有趣表情包吧!!\\n\\n大家围观的直播\\n\\nLIVE她好像知道自己很可爱糖醋娜美611\\n\\n客服\\n\\n赛事库\\n\\n课堂\\n\\n2021拜年纪', metadata={'source': 'https://www.bilibili.com/video/BV1gT411H7RT/?spm_id_from=333.1007.tianma.3-3-9.click'})]"
|
43 |
]
|
44 |
},
|
45 |
+
"execution_count": 8,
|
46 |
"metadata": {},
|
47 |
"output_type": "execute_result"
|
48 |
}
|
memory_kg.ipynb
ADDED
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 21,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [],
|
8 |
+
"source": [
|
9 |
+
"from langchain import OpenAI\n",
|
10 |
+
"from langchain.memory import ConversationKGMemory\n",
|
11 |
+
"\n",
|
12 |
+
"\n",
|
13 |
+
"llm = OpenAI(temperature=0)\n",
|
14 |
+
"from langchain.prompts.prompt import PromptTemplate\n",
|
15 |
+
"from langchain.chains import ConversationChain\n",
|
16 |
+
"\n",
|
17 |
+
"template = \"\"\"The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. \n",
|
18 |
+
"If the AI does not know the answer to a question, it truthfully says it does not know. The AI ONLY uses information contained in the \"Relevant Information\" section and does not hallucinate.\n",
|
19 |
+
"\n",
|
20 |
+
"Relevant Information:\n",
|
21 |
+
"\n",
|
22 |
+
"{history}\n",
|
23 |
+
"\n",
|
24 |
+
"Conversation:\n",
|
25 |
+
"Human: {input}\n",
|
26 |
+
"AI:\"\"\"\n",
|
27 |
+
"prompt = PromptTemplate(\n",
|
28 |
+
" input_variables=[\"history\", \"input\"], template=template\n",
|
29 |
+
")\n",
|
30 |
+
"conversation_with_kg = ConversationChain(\n",
|
31 |
+
" llm=llm, \n",
|
32 |
+
" verbose=True, \n",
|
33 |
+
" prompt=prompt,\n",
|
34 |
+
" memory=ConversationKGMemory(llm=llm)\n",
|
35 |
+
")"
|
36 |
+
]
|
37 |
+
},
|
38 |
+
{
|
39 |
+
"cell_type": "code",
|
40 |
+
"execution_count": 22,
|
41 |
+
"metadata": {},
|
42 |
+
"outputs": [
|
43 |
+
{
|
44 |
+
"name": "stdout",
|
45 |
+
"output_type": "stream",
|
46 |
+
"text": [
|
47 |
+
"\n",
|
48 |
+
"\n",
|
49 |
+
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
50 |
+
"Prompt after formatting:\n",
|
51 |
+
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. \n",
|
52 |
+
"If the AI does not know the answer to a question, it truthfully says it does not know. The AI ONLY uses information contained in the \"Relevant Information\" section and does not hallucinate.\n",
|
53 |
+
"\n",
|
54 |
+
"Relevant Information:\n",
|
55 |
+
"\n",
|
56 |
+
"\n",
|
57 |
+
"\n",
|
58 |
+
"Conversation:\n",
|
59 |
+
"Human: 你好\n",
|
60 |
+
"AI:\u001b[0m\n",
|
61 |
+
"\n",
|
62 |
+
"\u001b[1m> Finished chain.\u001b[0m\n"
|
63 |
+
]
|
64 |
+
},
|
65 |
+
{
|
66 |
+
"data": {
|
67 |
+
"text/plain": [
|
68 |
+
"' 你好!很高兴见到你。我是一个AI,我可以回答你的问题。你想问我什么?'"
|
69 |
+
]
|
70 |
+
},
|
71 |
+
"execution_count": 22,
|
72 |
+
"metadata": {},
|
73 |
+
"output_type": "execute_result"
|
74 |
+
}
|
75 |
+
],
|
76 |
+
"source": [
|
77 |
+
"conversation_with_kg.predict(input=\"你好\")"
|
78 |
+
]
|
79 |
+
},
|
80 |
+
{
|
81 |
+
"cell_type": "code",
|
82 |
+
"execution_count": 23,
|
83 |
+
"metadata": {},
|
84 |
+
"outputs": [
|
85 |
+
{
|
86 |
+
"name": "stdout",
|
87 |
+
"output_type": "stream",
|
88 |
+
"text": [
|
89 |
+
"\n",
|
90 |
+
"\n",
|
91 |
+
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
92 |
+
"Prompt after formatting:\n",
|
93 |
+
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. \n",
|
94 |
+
"If the AI does not know the answer to a question, it truthfully says it does not know. The AI ONLY uses information contained in the \"Relevant Information\" section and does not hallucinate.\n",
|
95 |
+
"\n",
|
96 |
+
"Relevant Information:\n",
|
97 |
+
"\n",
|
98 |
+
"\n",
|
99 |
+
"\n",
|
100 |
+
"Conversation:\n",
|
101 |
+
"Human: 我叫李振,我正在帮助张三写作业,他是一名小学生。\n",
|
102 |
+
"AI:\u001b[0m\n",
|
103 |
+
"\n",
|
104 |
+
"\u001b[1m> Finished chain.\u001b[0m\n"
|
105 |
+
]
|
106 |
+
},
|
107 |
+
{
|
108 |
+
"data": {
|
109 |
+
"text/plain": [
|
110 |
+
"' 哦,你好李振!很高兴认识你。我知道张三是一名小学生,他有多大了?'"
|
111 |
+
]
|
112 |
+
},
|
113 |
+
"execution_count": 23,
|
114 |
+
"metadata": {},
|
115 |
+
"output_type": "execute_result"
|
116 |
+
}
|
117 |
+
],
|
118 |
+
"source": [
|
119 |
+
"conversation_with_kg.predict(input=\"我叫李振,我正在帮助张三写作业,他是一名小学生。\")"
|
120 |
+
]
|
121 |
+
},
|
122 |
+
{
|
123 |
+
"cell_type": "code",
|
124 |
+
"execution_count": 24,
|
125 |
+
"metadata": {},
|
126 |
+
"outputs": [
|
127 |
+
{
|
128 |
+
"name": "stdout",
|
129 |
+
"output_type": "stream",
|
130 |
+
"text": [
|
131 |
+
"\n",
|
132 |
+
"\n",
|
133 |
+
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
134 |
+
"Prompt after formatting:\n",
|
135 |
+
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. \n",
|
136 |
+
"If the AI does not know the answer to a question, it truthfully says it does not know. The AI ONLY uses information contained in the \"Relevant Information\" section and does not hallucinate.\n",
|
137 |
+
"\n",
|
138 |
+
"Relevant Information:\n",
|
139 |
+
"\n",
|
140 |
+
"On 张三: 张三 是 一名小学生.\n",
|
141 |
+
"\n",
|
142 |
+
"Conversation:\n",
|
143 |
+
"Human: 关于张三,你了解多少\n",
|
144 |
+
"AI:\u001b[0m\n",
|
145 |
+
"\n",
|
146 |
+
"\u001b[1m> Finished chain.\u001b[0m\n"
|
147 |
+
]
|
148 |
+
},
|
149 |
+
{
|
150 |
+
"data": {
|
151 |
+
"text/plain": [
|
152 |
+
"' 我知道张三是一名小学生。'"
|
153 |
+
]
|
154 |
+
},
|
155 |
+
"execution_count": 24,
|
156 |
+
"metadata": {},
|
157 |
+
"output_type": "execute_result"
|
158 |
+
}
|
159 |
+
],
|
160 |
+
"source": [
|
161 |
+
"conversation_with_kg.predict(input=\"关于张三,你了解多少\")"
|
162 |
+
]
|
163 |
+
}
|
164 |
+
],
|
165 |
+
"metadata": {
|
166 |
+
"kernelspec": {
|
167 |
+
"display_name": "base",
|
168 |
+
"language": "python",
|
169 |
+
"name": "python3"
|
170 |
+
},
|
171 |
+
"language_info": {
|
172 |
+
"codemirror_mode": {
|
173 |
+
"name": "ipython",
|
174 |
+
"version": 3
|
175 |
+
},
|
176 |
+
"file_extension": ".py",
|
177 |
+
"mimetype": "text/x-python",
|
178 |
+
"name": "python",
|
179 |
+
"nbconvert_exporter": "python",
|
180 |
+
"pygments_lexer": "ipython3",
|
181 |
+
"version": "3.10.10"
|
182 |
+
},
|
183 |
+
"orig_nbformat": 4
|
184 |
+
},
|
185 |
+
"nbformat": 4,
|
186 |
+
"nbformat_minor": 2
|
187 |
+
}
|
memory_predict_with_history.ipynb
ADDED
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 1,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [
|
8 |
+
{
|
9 |
+
"name": "stdout",
|
10 |
+
"output_type": "stream",
|
11 |
+
"text": [
|
12 |
+
"\n",
|
13 |
+
"\n",
|
14 |
+
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
15 |
+
"Prompt after formatting:\n",
|
16 |
+
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
17 |
+
"\n",
|
18 |
+
"Current conversation:\n",
|
19 |
+
"\n",
|
20 |
+
"Human: 你好\n",
|
21 |
+
"AI:\u001b[0m\n",
|
22 |
+
"\n",
|
23 |
+
"\u001b[1m> Finished chain.\u001b[0m\n"
|
24 |
+
]
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"data": {
|
28 |
+
"text/plain": [
|
29 |
+
"' 你好!很高兴见到你!我是一个AI,我可以为你提供有关我所在地区的信息。你想知道什么?'"
|
30 |
+
]
|
31 |
+
},
|
32 |
+
"execution_count": 1,
|
33 |
+
"metadata": {},
|
34 |
+
"output_type": "execute_result"
|
35 |
+
}
|
36 |
+
],
|
37 |
+
"source": [
|
38 |
+
"from langchain.llms import OpenAI\n",
|
39 |
+
"from langchain.chains import ConversationChain\n",
|
40 |
+
"from langchain.memory import ConversationBufferMemory\n",
|
41 |
+
"\n",
|
42 |
+
"llm = OpenAI(temperature=0.5)\n",
|
43 |
+
"chain = ConversationChain(\n",
|
44 |
+
" llm=llm,\n",
|
45 |
+
" verbose=True,\n",
|
46 |
+
" memory=ConversationBufferMemory(ai_prefix=\"xiaobang\")\n",
|
47 |
+
")\n",
|
48 |
+
"chain.predict(input=\"你好\")"
|
49 |
+
]
|
50 |
+
},
|
51 |
+
{
|
52 |
+
"cell_type": "code",
|
53 |
+
"execution_count": 2,
|
54 |
+
"metadata": {},
|
55 |
+
"outputs": [
|
56 |
+
{
|
57 |
+
"name": "stdout",
|
58 |
+
"output_type": "stream",
|
59 |
+
"text": [
|
60 |
+
"\n",
|
61 |
+
"\n",
|
62 |
+
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
63 |
+
"Prompt after formatting:\n",
|
64 |
+
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
65 |
+
"\n",
|
66 |
+
"Current conversation:\n",
|
67 |
+
"Human: 你好\n",
|
68 |
+
"xiaobang: 你好!很高兴见到你!我是一个AI,我可以为你提供有关我所在地区的信息。你想知道什么?\n",
|
69 |
+
"Human: 北京天气怎么样\n",
|
70 |
+
"AI:\u001b[0m\n",
|
71 |
+
"\n",
|
72 |
+
"\u001b[1m> Finished chain.\u001b[0m\n"
|
73 |
+
]
|
74 |
+
},
|
75 |
+
{
|
76 |
+
"data": {
|
77 |
+
"text/plain": [
|
78 |
+
"' 北京的天气现在很好!今天是晴天,温度约为20摄氏度,并且有轻微的风。'"
|
79 |
+
]
|
80 |
+
},
|
81 |
+
"execution_count": 2,
|
82 |
+
"metadata": {},
|
83 |
+
"output_type": "execute_result"
|
84 |
+
}
|
85 |
+
],
|
86 |
+
"source": [
|
87 |
+
"chain.predict(input=\"北京天气怎么样\")"
|
88 |
+
]
|
89 |
+
},
|
90 |
+
{
|
91 |
+
"cell_type": "code",
|
92 |
+
"execution_count": 3,
|
93 |
+
"metadata": {},
|
94 |
+
"outputs": [
|
95 |
+
{
|
96 |
+
"name": "stdout",
|
97 |
+
"output_type": "stream",
|
98 |
+
"text": [
|
99 |
+
"\n",
|
100 |
+
"\n",
|
101 |
+
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
102 |
+
"Prompt after formatting:\n",
|
103 |
+
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
104 |
+
"\n",
|
105 |
+
"Current conversation:\n",
|
106 |
+
"Human: 你好\n",
|
107 |
+
"xiaobang: 你好!很高兴见到你!我是一个AI,我可以为你提供有关我所在地区的信息。你想知道什么?\n",
|
108 |
+
"Human: 北京天气怎么样\n",
|
109 |
+
"xiaobang: 北京的天气现在很好!今天是晴天,温度约为20摄氏度,并且有轻微的风。\n",
|
110 |
+
"Human: 天津呢\n",
|
111 |
+
"AI:\u001b[0m\n",
|
112 |
+
"\n",
|
113 |
+
"\u001b[1m> Finished chain.\u001b[0m\n"
|
114 |
+
]
|
115 |
+
},
|
116 |
+
{
|
117 |
+
"data": {
|
118 |
+
"text/plain": [
|
119 |
+
"' 对不起,我不知道天津的天气情况。'"
|
120 |
+
]
|
121 |
+
},
|
122 |
+
"execution_count": 3,
|
123 |
+
"metadata": {},
|
124 |
+
"output_type": "execute_result"
|
125 |
+
}
|
126 |
+
],
|
127 |
+
"source": [
|
128 |
+
"chain.predict(input=\"天津呢\")"
|
129 |
+
]
|
130 |
+
}
|
131 |
+
],
|
132 |
+
"metadata": {
|
133 |
+
"kernelspec": {
|
134 |
+
"display_name": "base",
|
135 |
+
"language": "python",
|
136 |
+
"name": "python3"
|
137 |
+
},
|
138 |
+
"language_info": {
|
139 |
+
"codemirror_mode": {
|
140 |
+
"name": "ipython",
|
141 |
+
"version": 3
|
142 |
+
},
|
143 |
+
"file_extension": ".py",
|
144 |
+
"mimetype": "text/x-python",
|
145 |
+
"name": "python",
|
146 |
+
"nbconvert_exporter": "python",
|
147 |
+
"pygments_lexer": "ipython3",
|
148 |
+
"version": "3.10.10"
|
149 |
+
},
|
150 |
+
"orig_nbformat": 4
|
151 |
+
},
|
152 |
+
"nbformat": 4,
|
153 |
+
"nbformat_minor": 2
|
154 |
+
}
|
memory_start.ipynb
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 3,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [
|
8 |
+
{
|
9 |
+
"data": {
|
10 |
+
"text/plain": [
|
11 |
+
"{'history': [HumanMessage(content='你好', additional_kwargs={}),\n",
|
12 |
+
" AIMessage(content='您好,有什么可以帮您?', additional_kwargs={})]}"
|
13 |
+
]
|
14 |
+
},
|
15 |
+
"execution_count": 3,
|
16 |
+
"metadata": {},
|
17 |
+
"output_type": "execute_result"
|
18 |
+
}
|
19 |
+
],
|
20 |
+
"source": [
|
21 |
+
"from langchain.memory import ConversationBufferMemory\n",
|
22 |
+
"\n",
|
23 |
+
"memory = ConversationBufferMemory(return_messages=True)\n",
|
24 |
+
"# memory = ConversationBufferMemory()\n",
|
25 |
+
"\n",
|
26 |
+
"memory.chat_memory.add_user_message(\"你好\")\n",
|
27 |
+
"memory.chat_memory.add_ai_message(\"您好,有什么可以帮您?\")\n",
|
28 |
+
"memory.load_memory_variables({})"
|
29 |
+
]
|
30 |
+
}
|
31 |
+
],
|
32 |
+
"metadata": {
|
33 |
+
"kernelspec": {
|
34 |
+
"display_name": "base",
|
35 |
+
"language": "python",
|
36 |
+
"name": "python3"
|
37 |
+
},
|
38 |
+
"language_info": {
|
39 |
+
"codemirror_mode": {
|
40 |
+
"name": "ipython",
|
41 |
+
"version": 3
|
42 |
+
},
|
43 |
+
"file_extension": ".py",
|
44 |
+
"mimetype": "text/x-python",
|
45 |
+
"name": "python",
|
46 |
+
"nbconvert_exporter": "python",
|
47 |
+
"pygments_lexer": "ipython3",
|
48 |
+
"version": "3.10.10"
|
49 |
+
},
|
50 |
+
"orig_nbformat": 4
|
51 |
+
},
|
52 |
+
"nbformat": 4,
|
53 |
+
"nbformat_minor": 2
|
54 |
+
}
|
memory_summary_buffer.ipynb
ADDED
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 1,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [
|
8 |
+
{
|
9 |
+
"name": "stdout",
|
10 |
+
"output_type": "stream",
|
11 |
+
"text": [
|
12 |
+
"\n",
|
13 |
+
"\n",
|
14 |
+
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
15 |
+
"Prompt after formatting:\n",
|
16 |
+
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
17 |
+
"\n",
|
18 |
+
"Current conversation:\n",
|
19 |
+
"\n",
|
20 |
+
"Human: 你好\n",
|
21 |
+
"AI:\u001b[0m\n",
|
22 |
+
"\n",
|
23 |
+
"\u001b[1m> Finished chain.\u001b[0m\n"
|
24 |
+
]
|
25 |
+
},
|
26 |
+
{
|
27 |
+
"data": {
|
28 |
+
"text/plain": [
|
29 |
+
"' 你好!很高兴见到你!我是一个智能AI,我可以回答你的问题,或者我们可以聊聊天?'"
|
30 |
+
]
|
31 |
+
},
|
32 |
+
"execution_count": 1,
|
33 |
+
"metadata": {},
|
34 |
+
"output_type": "execute_result"
|
35 |
+
}
|
36 |
+
],
|
37 |
+
"source": [
|
38 |
+
"from langchain.llms import OpenAI\n",
|
39 |
+
"from langchain.chains import ConversationChain\n",
|
40 |
+
"from langchain.memory import ConversationSummaryBufferMemory\n",
|
41 |
+
"\n",
|
42 |
+
"llm = OpenAI(temperature=0)\n",
|
43 |
+
"conversation_with_summary = ConversationChain(\n",
|
44 |
+
" llm=llm, \n",
|
45 |
+
" memory=ConversationSummaryBufferMemory(llm=OpenAI(), max_token_limit=10, ai_prefix=\"小帮小帮\"),\n",
|
46 |
+
" verbose=True\n",
|
47 |
+
")\n",
|
48 |
+
"conversation_with_summary.predict(input=\"你好\")"
|
49 |
+
]
|
50 |
+
},
|
51 |
+
{
|
52 |
+
"cell_type": "code",
|
53 |
+
"execution_count": 2,
|
54 |
+
"metadata": {},
|
55 |
+
"outputs": [
|
56 |
+
{
|
57 |
+
"name": "stdout",
|
58 |
+
"output_type": "stream",
|
59 |
+
"text": [
|
60 |
+
"\n",
|
61 |
+
"\n",
|
62 |
+
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
63 |
+
"Prompt after formatting:\n",
|
64 |
+
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
65 |
+
"\n",
|
66 |
+
"Current conversation:\n",
|
67 |
+
"System: \n",
|
68 |
+
"The human greeted the AI and the AI replied enthusiastically, introducing themselves as a capable AI that could answer questions or engage in conversation.\n",
|
69 |
+
"Human: 你知道一元二次方程吗\n",
|
70 |
+
"AI:\u001b[0m\n",
|
71 |
+
"\n",
|
72 |
+
"\u001b[1m> Finished chain.\u001b[0m\n"
|
73 |
+
]
|
74 |
+
},
|
75 |
+
{
|
76 |
+
"data": {
|
77 |
+
"text/plain": [
|
78 |
+
"' 是的,我知道一元二次方程。它是一个有两个未知数的方程,可以用来求解一元二次不等式。它的一般形式是ax² + bx + c = 0,其中a,b和c是常数,而x是未知数。'"
|
79 |
+
]
|
80 |
+
},
|
81 |
+
"execution_count": 2,
|
82 |
+
"metadata": {},
|
83 |
+
"output_type": "execute_result"
|
84 |
+
}
|
85 |
+
],
|
86 |
+
"source": [
|
87 |
+
"conversation_with_summary.predict(input=\"你知道一元二次方程吗\")"
|
88 |
+
]
|
89 |
+
},
|
90 |
+
{
|
91 |
+
"cell_type": "code",
|
92 |
+
"execution_count": 3,
|
93 |
+
"metadata": {},
|
94 |
+
"outputs": [
|
95 |
+
{
|
96 |
+
"name": "stdout",
|
97 |
+
"output_type": "stream",
|
98 |
+
"text": [
|
99 |
+
"\n",
|
100 |
+
"\n",
|
101 |
+
"\u001b[1m> Entering new ConversationChain chain...\u001b[0m\n",
|
102 |
+
"Prompt after formatting:\n",
|
103 |
+
"\u001b[32;1m\u001b[1;3mThe following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\n",
|
104 |
+
"\n",
|
105 |
+
"Current conversation:\n",
|
106 |
+
"System: \n",
|
107 |
+
"The human greeted the AI and the AI replied enthusiastically, introducing themselves as a capable AI that could answer questions or engage in conversation. When asked if it knew about quadratic equations, the AI confirmed that it did, describing that it is an equation with two unknowns that can be used to solve a quadratic inequality. It's general form is ax² + bx + c = 0, where a, b, and c are constants and x is the unknown.\n",
|
108 |
+
"Human: 怎么求解呢\n",
|
109 |
+
"AI:\u001b[0m\n",
|
110 |
+
"\n",
|
111 |
+
"\u001b[1m> Finished chain.\u001b[0m\n"
|
112 |
+
]
|
113 |
+
},
|
114 |
+
{
|
115 |
+
"data": {
|
116 |
+
"text/plain": [
|
117 |
+
"' 求解一元二次方程的方法有多种,其中最常用的是利用“求根公式”,即:x1,2 = -b ± √(b² - 4ac) / 2a,其中a, b, c分别为一元二次方程ax² + bx + c = 0中的系数。'"
|
118 |
+
]
|
119 |
+
},
|
120 |
+
"execution_count": 3,
|
121 |
+
"metadata": {},
|
122 |
+
"output_type": "execute_result"
|
123 |
+
}
|
124 |
+
],
|
125 |
+
"source": [
|
126 |
+
"conversation_with_summary.predict(input=\"怎么求解呢\")"
|
127 |
+
]
|
128 |
+
}
|
129 |
+
],
|
130 |
+
"metadata": {
|
131 |
+
"kernelspec": {
|
132 |
+
"display_name": "base",
|
133 |
+
"language": "python",
|
134 |
+
"name": "python3"
|
135 |
+
},
|
136 |
+
"language_info": {
|
137 |
+
"codemirror_mode": {
|
138 |
+
"name": "ipython",
|
139 |
+
"version": 3
|
140 |
+
},
|
141 |
+
"file_extension": ".py",
|
142 |
+
"mimetype": "text/x-python",
|
143 |
+
"name": "python",
|
144 |
+
"nbconvert_exporter": "python",
|
145 |
+
"pygments_lexer": "ipython3",
|
146 |
+
"version": "3.10.10"
|
147 |
+
},
|
148 |
+
"orig_nbformat": 4
|
149 |
+
},
|
150 |
+
"nbformat": 4,
|
151 |
+
"nbformat_minor": 2
|
152 |
+
}
|
retriever_chatgpt.ipynb
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 1,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [],
|
8 |
+
"source": [
|
9 |
+
"# STEP 1: Load\n",
|
10 |
+
"\n",
|
11 |
+
"# Load documents using LangChain's DocumentLoaders\n",
|
12 |
+
"# This is from https://langchain.readthedocs.io/en/latest/modules/document_loaders/examples/csv.html\n",
|
13 |
+
"\n",
|
14 |
+
"from langchain.document_loaders.csv_loader import CSVLoader\n",
|
15 |
+
"loader = CSVLoader(file_path='.inner/xbxb.csv')\n",
|
16 |
+
"data = loader.load()\n",
|
17 |
+
"\n",
|
18 |
+
"\n",
|
19 |
+
"# STEP 2: Convert\n",
|
20 |
+
"\n",
|
21 |
+
"# Convert Document to format expected by https://github.com/openai/chatgpt-retrieval-plugin\n",
|
22 |
+
"from typing import List\n",
|
23 |
+
"from langchain.docstore.document import Document\n",
|
24 |
+
"import json\n",
|
25 |
+
"\n",
|
26 |
+
"def write_json(path: str, documents: List[Document])-> None:\n",
|
27 |
+
" results = [{\"text\": doc.page_content} for doc in documents]\n",
|
28 |
+
" with open(path, \"w\") as f:\n",
|
29 |
+
" json.dump(results, f, indent=2)\n",
|
30 |
+
"\n",
|
31 |
+
"write_json(\".inner/foo.json\", data)\n",
|
32 |
+
"\n",
|
33 |
+
"# STEP 3: Use\n",
|
34 |
+
"\n",
|
35 |
+
"# Ingest this as you would any other json file in https://github.com/openai/chatgpt-retrieval-plugin/tree/main/scripts/process_json"
|
36 |
+
]
|
37 |
+
},
|
38 |
+
{
|
39 |
+
"cell_type": "code",
|
40 |
+
"execution_count": 3,
|
41 |
+
"metadata": {},
|
42 |
+
"outputs": [
|
43 |
+
{
|
44 |
+
"ename": "ConnectionError",
|
45 |
+
"evalue": "HTTPConnectionPool(host='127.0.0.1', port=8080): Max retries exceeded with url: /query (Caused by NewConnectionError('<urllib3.connection.HTTPConnection object at 0x103f8fa30>: Failed to establish a new connection: [Errno 61] Connection refused'))",
|
46 |
+
"output_type": "error",
|
47 |
+
"traceback": [
|
48 |
+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
49 |
+
"\u001b[0;31mConnectionRefusedError\u001b[0m Traceback (most recent call last)",
|
50 |
+
"File \u001b[0;32m~/anaconda3/lib/python3.10/site-packages/urllib3/connection.py:174\u001b[0m, in \u001b[0;36mHTTPConnection._new_conn\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 173\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m--> 174\u001b[0m conn \u001b[39m=\u001b[39m connection\u001b[39m.\u001b[39;49mcreate_connection(\n\u001b[1;32m 175\u001b[0m (\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_dns_host, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mport), \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mtimeout, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mextra_kw\n\u001b[1;32m 176\u001b[0m )\n\u001b[1;32m 178\u001b[0m \u001b[39mexcept\u001b[39;00m SocketTimeout:\n",
|
51 |
+
"File \u001b[0;32m~/anaconda3/lib/python3.10/site-packages/urllib3/util/connection.py:95\u001b[0m, in \u001b[0;36mcreate_connection\u001b[0;34m(address, timeout, source_address, socket_options)\u001b[0m\n\u001b[1;32m 94\u001b[0m \u001b[39mif\u001b[39;00m err \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[0;32m---> 95\u001b[0m \u001b[39mraise\u001b[39;00m err\n\u001b[1;32m 97\u001b[0m \u001b[39mraise\u001b[39;00m socket\u001b[39m.\u001b[39merror(\u001b[39m\"\u001b[39m\u001b[39mgetaddrinfo returns an empty list\u001b[39m\u001b[39m\"\u001b[39m)\n",
|
52 |
+
"File \u001b[0;32m~/anaconda3/lib/python3.10/site-packages/urllib3/util/connection.py:85\u001b[0m, in \u001b[0;36mcreate_connection\u001b[0;34m(address, timeout, source_address, socket_options)\u001b[0m\n\u001b[1;32m 84\u001b[0m sock\u001b[39m.\u001b[39mbind(source_address)\n\u001b[0;32m---> 85\u001b[0m sock\u001b[39m.\u001b[39;49mconnect(sa)\n\u001b[1;32m 86\u001b[0m \u001b[39mreturn\u001b[39;00m sock\n",
|
53 |
+
"\u001b[0;31mConnectionRefusedError\u001b[0m: [Errno 61] Connection refused",
|
54 |
+
"\nDuring handling of the above exception, another exception occurred:\n",
|
55 |
+
"\u001b[0;31mNewConnectionError\u001b[0m Traceback (most recent call last)",
|
56 |
+
"File \u001b[0;32m~/anaconda3/lib/python3.10/site-packages/urllib3/connectionpool.py:703\u001b[0m, in \u001b[0;36mHTTPConnectionPool.urlopen\u001b[0;34m(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)\u001b[0m\n\u001b[1;32m 702\u001b[0m \u001b[39m# Make the request on the httplib connection object.\u001b[39;00m\n\u001b[0;32m--> 703\u001b[0m httplib_response \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_make_request(\n\u001b[1;32m 704\u001b[0m conn,\n\u001b[1;32m 705\u001b[0m method,\n\u001b[1;32m 706\u001b[0m url,\n\u001b[1;32m 707\u001b[0m timeout\u001b[39m=\u001b[39;49mtimeout_obj,\n\u001b[1;32m 708\u001b[0m body\u001b[39m=\u001b[39;49mbody,\n\u001b[1;32m 709\u001b[0m headers\u001b[39m=\u001b[39;49mheaders,\n\u001b[1;32m 710\u001b[0m chunked\u001b[39m=\u001b[39;49mchunked,\n\u001b[1;32m 711\u001b[0m )\n\u001b[1;32m 713\u001b[0m \u001b[39m# If we're going to release the connection in ``finally:``, then\u001b[39;00m\n\u001b[1;32m 714\u001b[0m \u001b[39m# the response doesn't need to know about the connection. Otherwise\u001b[39;00m\n\u001b[1;32m 715\u001b[0m \u001b[39m# it will also try to release it and we'll have a double-release\u001b[39;00m\n\u001b[1;32m 716\u001b[0m \u001b[39m# mess.\u001b[39;00m\n",
|
57 |
+
"File \u001b[0;32m~/anaconda3/lib/python3.10/site-packages/urllib3/connectionpool.py:398\u001b[0m, in \u001b[0;36mHTTPConnectionPool._make_request\u001b[0;34m(self, conn, method, url, timeout, chunked, **httplib_request_kw)\u001b[0m\n\u001b[1;32m 397\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m--> 398\u001b[0m conn\u001b[39m.\u001b[39;49mrequest(method, url, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mhttplib_request_kw)\n\u001b[1;32m 400\u001b[0m \u001b[39m# We are swallowing BrokenPipeError (errno.EPIPE) since the server is\u001b[39;00m\n\u001b[1;32m 401\u001b[0m \u001b[39m# legitimately able to close the connection after sending a valid response.\u001b[39;00m\n\u001b[1;32m 402\u001b[0m \u001b[39m# With this behaviour, the received response is still readable.\u001b[39;00m\n",
|
58 |
+
"File \u001b[0;32m~/anaconda3/lib/python3.10/site-packages/urllib3/connection.py:244\u001b[0m, in \u001b[0;36mHTTPConnection.request\u001b[0;34m(self, method, url, body, headers)\u001b[0m\n\u001b[1;32m 243\u001b[0m headers[\u001b[39m\"\u001b[39m\u001b[39mUser-Agent\u001b[39m\u001b[39m\"\u001b[39m] \u001b[39m=\u001b[39m _get_default_user_agent()\n\u001b[0;32m--> 244\u001b[0m \u001b[39msuper\u001b[39;49m(HTTPConnection, \u001b[39mself\u001b[39;49m)\u001b[39m.\u001b[39;49mrequest(method, url, body\u001b[39m=\u001b[39;49mbody, headers\u001b[39m=\u001b[39;49mheaders)\n",
|
59 |
+
"File \u001b[0;32m~/anaconda3/lib/python3.10/http/client.py:1282\u001b[0m, in \u001b[0;36mHTTPConnection.request\u001b[0;34m(self, method, url, body, headers, encode_chunked)\u001b[0m\n\u001b[1;32m 1281\u001b[0m \u001b[39m\u001b[39m\u001b[39m\"\"\"Send a complete request to the server.\"\"\"\u001b[39;00m\n\u001b[0;32m-> 1282\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_send_request(method, url, body, headers, encode_chunked)\n",
|
60 |
+
"File \u001b[0;32m~/anaconda3/lib/python3.10/http/client.py:1328\u001b[0m, in \u001b[0;36mHTTPConnection._send_request\u001b[0;34m(self, method, url, body, headers, encode_chunked)\u001b[0m\n\u001b[1;32m 1327\u001b[0m body \u001b[39m=\u001b[39m _encode(body, \u001b[39m'\u001b[39m\u001b[39mbody\u001b[39m\u001b[39m'\u001b[39m)\n\u001b[0;32m-> 1328\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mendheaders(body, encode_chunked\u001b[39m=\u001b[39;49mencode_chunked)\n",
|
61 |
+
"File \u001b[0;32m~/anaconda3/lib/python3.10/http/client.py:1277\u001b[0m, in \u001b[0;36mHTTPConnection.endheaders\u001b[0;34m(self, message_body, encode_chunked)\u001b[0m\n\u001b[1;32m 1276\u001b[0m \u001b[39mraise\u001b[39;00m CannotSendHeader()\n\u001b[0;32m-> 1277\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_send_output(message_body, encode_chunked\u001b[39m=\u001b[39;49mencode_chunked)\n",
|
62 |
+
"File \u001b[0;32m~/anaconda3/lib/python3.10/http/client.py:1037\u001b[0m, in \u001b[0;36mHTTPConnection._send_output\u001b[0;34m(self, message_body, encode_chunked)\u001b[0m\n\u001b[1;32m 1036\u001b[0m \u001b[39mdel\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_buffer[:]\n\u001b[0;32m-> 1037\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49msend(msg)\n\u001b[1;32m 1039\u001b[0m \u001b[39mif\u001b[39;00m message_body \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[1;32m 1040\u001b[0m \n\u001b[1;32m 1041\u001b[0m \u001b[39m# create a consistent interface to message_body\u001b[39;00m\n",
|
63 |
+
"File \u001b[0;32m~/anaconda3/lib/python3.10/http/client.py:975\u001b[0m, in \u001b[0;36mHTTPConnection.send\u001b[0;34m(self, data)\u001b[0m\n\u001b[1;32m 974\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mauto_open:\n\u001b[0;32m--> 975\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mconnect()\n\u001b[1;32m 976\u001b[0m \u001b[39melse\u001b[39;00m:\n",
|
64 |
+
"File \u001b[0;32m~/anaconda3/lib/python3.10/site-packages/urllib3/connection.py:205\u001b[0m, in \u001b[0;36mHTTPConnection.connect\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 204\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mconnect\u001b[39m(\u001b[39mself\u001b[39m):\n\u001b[0;32m--> 205\u001b[0m conn \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_new_conn()\n\u001b[1;32m 206\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_prepare_conn(conn)\n",
|
65 |
+
"File \u001b[0;32m~/anaconda3/lib/python3.10/site-packages/urllib3/connection.py:186\u001b[0m, in \u001b[0;36mHTTPConnection._new_conn\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 185\u001b[0m \u001b[39mexcept\u001b[39;00m SocketError \u001b[39mas\u001b[39;00m e:\n\u001b[0;32m--> 186\u001b[0m \u001b[39mraise\u001b[39;00m NewConnectionError(\n\u001b[1;32m 187\u001b[0m \u001b[39mself\u001b[39m, \u001b[39m\"\u001b[39m\u001b[39mFailed to establish a new connection: \u001b[39m\u001b[39m%s\u001b[39;00m\u001b[39m\"\u001b[39m \u001b[39m%\u001b[39m e\n\u001b[1;32m 188\u001b[0m )\n\u001b[1;32m 190\u001b[0m \u001b[39mreturn\u001b[39;00m conn\n",
|
66 |
+
"\u001b[0;31mNewConnectionError\u001b[0m: <urllib3.connection.HTTPConnection object at 0x103f8fa30>: Failed to establish a new connection: [Errno 61] Connection refused",
|
67 |
+
"\nDuring handling of the above exception, another exception occurred:\n",
|
68 |
+
"\u001b[0;31mMaxRetryError\u001b[0m Traceback (most recent call last)",
|
69 |
+
"File \u001b[0;32m~/anaconda3/lib/python3.10/site-packages/requests/adapters.py:489\u001b[0m, in \u001b[0;36mHTTPAdapter.send\u001b[0;34m(self, request, stream, timeout, verify, cert, proxies)\u001b[0m\n\u001b[1;32m 488\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m chunked:\n\u001b[0;32m--> 489\u001b[0m resp \u001b[39m=\u001b[39m conn\u001b[39m.\u001b[39;49murlopen(\n\u001b[1;32m 490\u001b[0m method\u001b[39m=\u001b[39;49mrequest\u001b[39m.\u001b[39;49mmethod,\n\u001b[1;32m 491\u001b[0m url\u001b[39m=\u001b[39;49murl,\n\u001b[1;32m 492\u001b[0m body\u001b[39m=\u001b[39;49mrequest\u001b[39m.\u001b[39;49mbody,\n\u001b[1;32m 493\u001b[0m headers\u001b[39m=\u001b[39;49mrequest\u001b[39m.\u001b[39;49mheaders,\n\u001b[1;32m 494\u001b[0m redirect\u001b[39m=\u001b[39;49m\u001b[39mFalse\u001b[39;49;00m,\n\u001b[1;32m 495\u001b[0m assert_same_host\u001b[39m=\u001b[39;49m\u001b[39mFalse\u001b[39;49;00m,\n\u001b[1;32m 496\u001b[0m preload_content\u001b[39m=\u001b[39;49m\u001b[39mFalse\u001b[39;49;00m,\n\u001b[1;32m 497\u001b[0m decode_content\u001b[39m=\u001b[39;49m\u001b[39mFalse\u001b[39;49;00m,\n\u001b[1;32m 498\u001b[0m retries\u001b[39m=\u001b[39;49m\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mmax_retries,\n\u001b[1;32m 499\u001b[0m timeout\u001b[39m=\u001b[39;49mtimeout,\n\u001b[1;32m 500\u001b[0m )\n\u001b[1;32m 502\u001b[0m \u001b[39m# Send the request.\u001b[39;00m\n\u001b[1;32m 503\u001b[0m \u001b[39melse\u001b[39;00m:\n",
|
70 |
+
"File \u001b[0;32m~/anaconda3/lib/python3.10/site-packages/urllib3/connectionpool.py:787\u001b[0m, in \u001b[0;36mHTTPConnectionPool.urlopen\u001b[0;34m(self, method, url, body, headers, retries, redirect, assert_same_host, timeout, pool_timeout, release_conn, chunked, body_pos, **response_kw)\u001b[0m\n\u001b[1;32m 785\u001b[0m e \u001b[39m=\u001b[39m ProtocolError(\u001b[39m\"\u001b[39m\u001b[39mConnection aborted.\u001b[39m\u001b[39m\"\u001b[39m, e)\n\u001b[0;32m--> 787\u001b[0m retries \u001b[39m=\u001b[39m retries\u001b[39m.\u001b[39;49mincrement(\n\u001b[1;32m 788\u001b[0m method, url, error\u001b[39m=\u001b[39;49me, _pool\u001b[39m=\u001b[39;49m\u001b[39mself\u001b[39;49m, _stacktrace\u001b[39m=\u001b[39;49msys\u001b[39m.\u001b[39;49mexc_info()[\u001b[39m2\u001b[39;49m]\n\u001b[1;32m 789\u001b[0m )\n\u001b[1;32m 790\u001b[0m retries\u001b[39m.\u001b[39msleep()\n",
|
71 |
+
"File \u001b[0;32m~/anaconda3/lib/python3.10/site-packages/urllib3/util/retry.py:592\u001b[0m, in \u001b[0;36mRetry.increment\u001b[0;34m(self, method, url, response, error, _pool, _stacktrace)\u001b[0m\n\u001b[1;32m 591\u001b[0m \u001b[39mif\u001b[39;00m new_retry\u001b[39m.\u001b[39mis_exhausted():\n\u001b[0;32m--> 592\u001b[0m \u001b[39mraise\u001b[39;00m MaxRetryError(_pool, url, error \u001b[39mor\u001b[39;00m ResponseError(cause))\n\u001b[1;32m 594\u001b[0m log\u001b[39m.\u001b[39mdebug(\u001b[39m\"\u001b[39m\u001b[39mIncremented Retry for (url=\u001b[39m\u001b[39m'\u001b[39m\u001b[39m%s\u001b[39;00m\u001b[39m'\u001b[39m\u001b[39m): \u001b[39m\u001b[39m%r\u001b[39;00m\u001b[39m\"\u001b[39m, url, new_retry)\n",
|
72 |
+
"\u001b[0;31mMaxRetryError\u001b[0m: HTTPConnectionPool(host='127.0.0.1', port=8080): Max retries exceeded with url: /query (Caused by NewConnectionError('<urllib3.connection.HTTPConnection object at 0x103f8fa30>: Failed to establish a new connection: [Errno 61] Connection refused'))",
|
73 |
+
"\nDuring handling of the above exception, another exception occurred:\n",
|
74 |
+
"\u001b[0;31mConnectionError\u001b[0m Traceback (most recent call last)",
|
75 |
+
"Cell \u001b[0;32mIn[3], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mlangchain\u001b[39;00m\u001b[39m.\u001b[39;00m\u001b[39mretrievers\u001b[39;00m \u001b[39mimport\u001b[39;00m ChatGPTPluginRetriever\n\u001b[1;32m 2\u001b[0m retriever \u001b[39m=\u001b[39m ChatGPTPluginRetriever(url\u001b[39m=\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mhttp://127.0.0.1:8080\u001b[39m\u001b[39m\"\u001b[39m, bearer_token\u001b[39m=\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mfoo\u001b[39m\u001b[39m\"\u001b[39m)\n\u001b[0;32m----> 3\u001b[0m retriever\u001b[39m.\u001b[39;49mget_relevant_documents(\u001b[39m\"\u001b[39;49m\u001b[39malice\u001b[39;49m\u001b[39m'\u001b[39;49m\u001b[39ms phone number\u001b[39;49m\u001b[39m\"\u001b[39;49m)\n",
|
76 |
+
"File \u001b[0;32m~/anaconda3/lib/python3.10/site-packages/langchain/retrievers/chatgpt_plugin_retriever.py:26\u001b[0m, in \u001b[0;36mChatGPTPluginRetriever.get_relevant_documents\u001b[0;34m(self, query)\u001b[0m\n\u001b[1;32m 24\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mget_relevant_documents\u001b[39m(\u001b[39mself\u001b[39m, query: \u001b[39mstr\u001b[39m) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m List[Document]:\n\u001b[1;32m 25\u001b[0m url, json, headers \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_create_request(query)\n\u001b[0;32m---> 26\u001b[0m response \u001b[39m=\u001b[39m requests\u001b[39m.\u001b[39;49mpost(url, json\u001b[39m=\u001b[39;49mjson, headers\u001b[39m=\u001b[39;49mheaders)\n\u001b[1;32m 27\u001b[0m results \u001b[39m=\u001b[39m response\u001b[39m.\u001b[39mjson()[\u001b[39m\"\u001b[39m\u001b[39mresults\u001b[39m\u001b[39m\"\u001b[39m][\u001b[39m0\u001b[39m][\u001b[39m\"\u001b[39m\u001b[39mresults\u001b[39m\u001b[39m\"\u001b[39m]\n\u001b[1;32m 28\u001b[0m docs \u001b[39m=\u001b[39m []\n",
|
77 |
+
"File \u001b[0;32m~/anaconda3/lib/python3.10/site-packages/requests/api.py:115\u001b[0m, in \u001b[0;36mpost\u001b[0;34m(url, data, json, **kwargs)\u001b[0m\n\u001b[1;32m 103\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mpost\u001b[39m(url, data\u001b[39m=\u001b[39m\u001b[39mNone\u001b[39;00m, json\u001b[39m=\u001b[39m\u001b[39mNone\u001b[39;00m, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs):\n\u001b[1;32m 104\u001b[0m \u001b[39m \u001b[39m\u001b[39mr\u001b[39m\u001b[39m\"\"\"Sends a POST request.\u001b[39;00m\n\u001b[1;32m 105\u001b[0m \n\u001b[1;32m 106\u001b[0m \u001b[39m :param url: URL for the new :class:`Request` object.\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 112\u001b[0m \u001b[39m :rtype: requests.Response\u001b[39;00m\n\u001b[1;32m 113\u001b[0m \u001b[39m \"\"\"\u001b[39;00m\n\u001b[0;32m--> 115\u001b[0m \u001b[39mreturn\u001b[39;00m request(\u001b[39m\"\u001b[39;49m\u001b[39mpost\u001b[39;49m\u001b[39m\"\u001b[39;49m, url, data\u001b[39m=\u001b[39;49mdata, json\u001b[39m=\u001b[39;49mjson, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n",
|
78 |
+
"File \u001b[0;32m~/anaconda3/lib/python3.10/site-packages/requests/api.py:59\u001b[0m, in \u001b[0;36mrequest\u001b[0;34m(method, url, **kwargs)\u001b[0m\n\u001b[1;32m 55\u001b[0m \u001b[39m# By using the 'with' statement we are sure the session is closed, thus we\u001b[39;00m\n\u001b[1;32m 56\u001b[0m \u001b[39m# avoid leaving sockets open which can trigger a ResourceWarning in some\u001b[39;00m\n\u001b[1;32m 57\u001b[0m \u001b[39m# cases, and look like a memory leak in others.\u001b[39;00m\n\u001b[1;32m 58\u001b[0m \u001b[39mwith\u001b[39;00m sessions\u001b[39m.\u001b[39mSession() \u001b[39mas\u001b[39;00m session:\n\u001b[0;32m---> 59\u001b[0m \u001b[39mreturn\u001b[39;00m session\u001b[39m.\u001b[39;49mrequest(method\u001b[39m=\u001b[39;49mmethod, url\u001b[39m=\u001b[39;49murl, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n",
|
79 |
+
"File \u001b[0;32m~/anaconda3/lib/python3.10/site-packages/requests/sessions.py:587\u001b[0m, in \u001b[0;36mSession.request\u001b[0;34m(self, method, url, params, data, headers, cookies, files, auth, timeout, allow_redirects, proxies, hooks, stream, verify, cert, json)\u001b[0m\n\u001b[1;32m 582\u001b[0m send_kwargs \u001b[39m=\u001b[39m {\n\u001b[1;32m 583\u001b[0m \u001b[39m\"\u001b[39m\u001b[39mtimeout\u001b[39m\u001b[39m\"\u001b[39m: timeout,\n\u001b[1;32m 584\u001b[0m \u001b[39m\"\u001b[39m\u001b[39mallow_redirects\u001b[39m\u001b[39m\"\u001b[39m: allow_redirects,\n\u001b[1;32m 585\u001b[0m }\n\u001b[1;32m 586\u001b[0m send_kwargs\u001b[39m.\u001b[39mupdate(settings)\n\u001b[0;32m--> 587\u001b[0m resp \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49msend(prep, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49msend_kwargs)\n\u001b[1;32m 589\u001b[0m \u001b[39mreturn\u001b[39;00m resp\n",
|
80 |
+
"File \u001b[0;32m~/anaconda3/lib/python3.10/site-packages/requests/sessions.py:701\u001b[0m, in \u001b[0;36mSession.send\u001b[0;34m(self, request, **kwargs)\u001b[0m\n\u001b[1;32m 698\u001b[0m start \u001b[39m=\u001b[39m preferred_clock()\n\u001b[1;32m 700\u001b[0m \u001b[39m# Send the request\u001b[39;00m\n\u001b[0;32m--> 701\u001b[0m r \u001b[39m=\u001b[39m adapter\u001b[39m.\u001b[39;49msend(request, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m 703\u001b[0m \u001b[39m# Total elapsed time of the request (approximately)\u001b[39;00m\n\u001b[1;32m 704\u001b[0m elapsed \u001b[39m=\u001b[39m preferred_clock() \u001b[39m-\u001b[39m start\n",
|
81 |
+
"File \u001b[0;32m~/anaconda3/lib/python3.10/site-packages/requests/adapters.py:565\u001b[0m, in \u001b[0;36mHTTPAdapter.send\u001b[0;34m(self, request, stream, timeout, verify, cert, proxies)\u001b[0m\n\u001b[1;32m 561\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39misinstance\u001b[39m(e\u001b[39m.\u001b[39mreason, _SSLError):\n\u001b[1;32m 562\u001b[0m \u001b[39m# This branch is for urllib3 v1.22 and later.\u001b[39;00m\n\u001b[1;32m 563\u001b[0m \u001b[39mraise\u001b[39;00m SSLError(e, request\u001b[39m=\u001b[39mrequest)\n\u001b[0;32m--> 565\u001b[0m \u001b[39mraise\u001b[39;00m \u001b[39mConnectionError\u001b[39;00m(e, request\u001b[39m=\u001b[39mrequest)\n\u001b[1;32m 567\u001b[0m \u001b[39mexcept\u001b[39;00m ClosedPoolError \u001b[39mas\u001b[39;00m e:\n\u001b[1;32m 568\u001b[0m \u001b[39mraise\u001b[39;00m \u001b[39mConnectionError\u001b[39;00m(e, request\u001b[39m=\u001b[39mrequest)\n",
|
82 |
+
"\u001b[0;31mConnectionError\u001b[0m: HTTPConnectionPool(host='127.0.0.1', port=8080): Max retries exceeded with url: /query (Caused by NewConnectionError('<urllib3.connection.HTTPConnection object at 0x103f8fa30>: Failed to establish a new connection: [Errno 61] Connection refused'))"
|
83 |
+
]
|
84 |
+
}
|
85 |
+
],
|
86 |
+
"source": [
|
87 |
+
"from langchain.retrievers import ChatGPTPluginRetriever\n",
|
88 |
+
"retriever = ChatGPTPluginRetriever(url=\"http://127.0.0.1:8080\", bearer_token=\"foo\")\n",
|
89 |
+
"retriever.get_relevant_documents(\"alice's phone number\")"
|
90 |
+
]
|
91 |
+
}
|
92 |
+
],
|
93 |
+
"metadata": {
|
94 |
+
"kernelspec": {
|
95 |
+
"display_name": "base",
|
96 |
+
"language": "python",
|
97 |
+
"name": "python3"
|
98 |
+
},
|
99 |
+
"language_info": {
|
100 |
+
"codemirror_mode": {
|
101 |
+
"name": "ipython",
|
102 |
+
"version": 3
|
103 |
+
},
|
104 |
+
"file_extension": ".py",
|
105 |
+
"mimetype": "text/x-python",
|
106 |
+
"name": "python",
|
107 |
+
"nbconvert_exporter": "python",
|
108 |
+
"pygments_lexer": "ipython3",
|
109 |
+
"version": "3.10.10"
|
110 |
+
},
|
111 |
+
"orig_nbformat": 4
|
112 |
+
},
|
113 |
+
"nbformat": 4,
|
114 |
+
"nbformat_minor": 2
|
115 |
+
}
|