lizhen commited on
Commit
85a770f
·
1 Parent(s): 4bdd4c5

add streaming

Browse files
Files changed (2) hide show
  1. llms_streaming.ipynb +186 -0
  2. openai_simple.py +4 -1
llms_streaming.ipynb ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 6,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "from langchain.llms import OpenAI, Anthropic\n",
10
+ "from langchain.chat_models import ChatOpenAI\n",
11
+ "from langchain.callbacks.base import CallbackManager\n",
12
+ "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n",
13
+ "from langchain.schema import HumanMessage\n",
14
+ "from langchain.callbacks.base import BaseCallbackHandler\n",
15
+ "import sys\n",
16
+ "from typing import Any, Dict, List, Union\n",
17
+ "from langchain.schema import AgentAction, AgentFinish, LLMResult"
18
+ ]
19
+ },
20
+ {
21
+ "cell_type": "code",
22
+ "execution_count": 12,
23
+ "metadata": {},
24
+ "outputs": [],
25
+ "source": [
26
+ "class PrintCallbackHandler(BaseCallbackHandler):\n",
27
+ " \"\"\"Callback handler for streaming. Only works with LLMs that support streaming.\"\"\"\n",
28
+ "\n",
29
+ " def on_llm_start(\n",
30
+ " self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any\n",
31
+ " ) -> None:\n",
32
+ " \"\"\"Run when LLM starts running.\"\"\"\n",
33
+ " print(\"on_llm_start\")\n",
34
+ "\n",
35
+ " def on_llm_new_token(self, token: str, **kwargs: Any) -> None:\n",
36
+ " \"\"\"Run on new LLM token. Only available when streaming is enabled.\"\"\"\n",
37
+ " sys.stdout.write(token)\n",
38
+ " sys.stdout.flush()\n",
39
+ "\n",
40
+ " def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:\n",
41
+ " \"\"\"Run when LLM ends running.\"\"\"\n",
42
+ " print(\"\\non_llm_end\")\n",
43
+ "\n",
44
+ " def on_llm_error(\n",
45
+ " self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any\n",
46
+ " ) -> None:\n",
47
+ " \"\"\"Run when LLM errors.\"\"\"\n",
48
+ "\n",
49
+ " def on_chain_start(\n",
50
+ " self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any\n",
51
+ " ) -> None:\n",
52
+ " \"\"\"Run when chain starts running.\"\"\"\n",
53
+ " print(\"on_chain_start\")\n",
54
+ "\n",
55
+ " def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:\n",
56
+ " \"\"\"Run when chain ends running.\"\"\"\n",
57
+ " print(\"on_chain_end\")\n",
58
+ "\n",
59
+ " def on_chain_error(\n",
60
+ " self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any\n",
61
+ " ) -> None:\n",
62
+ " \"\"\"Run when chain errors.\"\"\"\n",
63
+ "\n",
64
+ " def on_tool_start(\n",
65
+ " self, serialized: Dict[str, Any], input_str: str, **kwargs: Any\n",
66
+ " ) -> None:\n",
67
+ " \"\"\"Run when tool starts running.\"\"\"\n",
68
+ " print(\"on_tool_start\")\n",
69
+ "\n",
70
+ " def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:\n",
71
+ " \"\"\"Run on agent action.\"\"\"\n",
72
+ " pass\n",
73
+ "\n",
74
+ " def on_tool_end(self, output: str, **kwargs: Any) -> None:\n",
75
+ " \"\"\"Run when tool ends running.\"\"\"\n",
76
+ "\n",
77
+ " def on_tool_error(\n",
78
+ " self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any\n",
79
+ " ) -> None:\n",
80
+ " \"\"\"Run when tool errors.\"\"\"\n",
81
+ "\n",
82
+ " def on_text(self, text: str, **kwargs: Any) -> None:\n",
83
+ " \"\"\"Run on arbitrary text.\"\"\"\n",
84
+ " print(\"on_text\")\n",
85
+ "\n",
86
+ " def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:\n",
87
+ " \"\"\"Run on agent end.\"\"\"\n",
88
+ " print(\"on_agent_finish\")"
89
+ ]
90
+ },
91
+ {
92
+ "cell_type": "code",
93
+ "execution_count": 13,
94
+ "metadata": {},
95
+ "outputs": [
96
+ {
97
+ "name": "stdout",
98
+ "output_type": "stream",
99
+ "text": [
100
+ "on_llm_start\n",
101
+ "\n",
102
+ "\n",
103
+ "每当我想起你 \n",
104
+ "心里总是满满的温柔 \n",
105
+ "每当我想起你 \n",
106
+ "总是有一种特别的感觉 \n",
107
+ "\n",
108
+ "每当我想起你 \n",
109
+ "总是有一种深深的思念 \n",
110
+ "每当我想起你 \n",
111
+ "总是有一种温暖的感觉 \n",
112
+ "\n",
113
+ "每当我想起你 \n",
114
+ "总是有一种温柔的爱 \n",
115
+ "每当我想起你 \n",
116
+ "总是有一种深深的情感 \n",
117
+ "\n",
118
+ "每当我想起你 \n",
119
+ "总是有一种温暖的温柔 \n",
120
+ "每当我想起你 \n",
121
+ "总是有一种特别的感动\n",
122
+ "on_llm_end\n"
123
+ ]
124
+ }
125
+ ],
126
+ "source": [
127
+ "# or use StreamingStdOutCallbackHandler\n",
128
+ "llm = OpenAI(max_tokens=1024, streaming=True, callback_manager=CallbackManager([PrintCallbackHandler()]), verbose=True, temperature=0)\n",
129
+ "resp = llm(\"仿写一份周杰伦的歌词\")"
130
+ ]
131
+ },
132
+ {
133
+ "cell_type": "code",
134
+ "execution_count": 10,
135
+ "metadata": {},
136
+ "outputs": [
137
+ {
138
+ "name": "stdout",
139
+ "output_type": "stream",
140
+ "text": [
141
+ "on_llm_start\n",
142
+ "\n",
143
+ "\n",
144
+ "Q: What did the fish say when it hit the wall?\n",
145
+ "A: Dam!on_llm_end\n"
146
+ ]
147
+ },
148
+ {
149
+ "data": {
150
+ "text/plain": [
151
+ "LLMResult(generations=[[Generation(text='\\n\\nQ: What did the fish say when it hit the wall?\\nA: Dam!', generation_info={'finish_reason': 'stop', 'logprobs': None})]], llm_output={'token_usage': {}, 'model_name': 'text-davinci-003'})"
152
+ ]
153
+ },
154
+ "execution_count": 10,
155
+ "metadata": {},
156
+ "output_type": "execute_result"
157
+ }
158
+ ],
159
+ "source": [
160
+ "llm.generate([\"Tell me a joke.\"])"
161
+ ]
162
+ }
163
+ ],
164
+ "metadata": {
165
+ "kernelspec": {
166
+ "display_name": "Python 3",
167
+ "language": "python",
168
+ "name": "python3"
169
+ },
170
+ "language_info": {
171
+ "codemirror_mode": {
172
+ "name": "ipython",
173
+ "version": 3
174
+ },
175
+ "file_extension": ".py",
176
+ "mimetype": "text/x-python",
177
+ "name": "python",
178
+ "nbconvert_exporter": "python",
179
+ "pygments_lexer": "ipython3",
180
+ "version": "3.11.3"
181
+ },
182
+ "orig_nbformat": 4
183
+ },
184
+ "nbformat": 4,
185
+ "nbformat_minor": 2
186
+ }
openai_simple.py CHANGED
@@ -1,12 +1,15 @@
1
  #!/usr/bin/python3
2
 
3
  from langchain.llms import OpenAI
 
 
 
4
  import langchain
5
  import os
6
 
7
  print(langchain.__version__)
8
 
9
- llm = OpenAI(temperature=0.9)
10
 
11
  text = "Hi, Could you help me choose a Chinese name? For example"
12
  print(llm(text))
 
1
  #!/usr/bin/python3
2
 
3
  from langchain.llms import OpenAI
4
+ from langchain.callbacks.base import CallbackManager
5
+ from langchain.callbacks.openai_info import OpenAICallbackHandler
6
+ from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
7
  import langchain
8
  import os
9
 
10
  print(langchain.__version__)
11
 
12
+ llm = OpenAI(temperature=0.9, callback_manager=CallbackManager([OpenAICallbackHandler()]))
13
 
14
  text = "Hi, Could you help me choose a Chinese name? For example"
15
  print(llm(text))