Lookimi Hazzzardous commited on
Commit
88426c0
0 Parent(s):

Duplicate from Hazzzardous/RWKV-Instruct

Browse files

Co-authored-by: Harrison Vanderbyl <[email protected]>

Files changed (6) hide show
  1. .gitattributes +35 -0
  2. 14B-8K-rwkvstic-2-1-2.rwkv +3 -0
  3. README.md +17 -0
  4. app.py +297 -0
  5. config.py +60 -0
  6. requirements.txt +7 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ 14B-8K-rwkvstic-2-1-2.rwkv filter=lfs diff=lfs merge=lfs -text
14B-8K-rwkvstic-2-1-2.rwkv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9863c8993a323971167f9625f6c37583bc5f78bcc8fe85ecbdf49ccb9b3b2f78
3
+ size 14455104393
README.md ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: RWKV Instruct
3
+ emoji: 💩
4
+ colorFrom: yellow
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 3.17.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: gpl-3.0
11
+ duplicated_from: Hazzzardous/RWKV-Instruct
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
15
+ Based on:
16
+
17
+ https://huggingface.co/spaces/yahma/rwkv
app.py ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ RWKV RNN Model - Gradio Space for HuggingFace
3
+ YT - Mean Gene Hacks - https://www.youtube.com/@MeanGeneHacks
4
+ (C) Gene Ruebsamen - 2/7/2023
5
+
6
+ This program is free software: you can redistribute it and/or modify
7
+ it under the terms of the GNU General Public License as published by
8
+ the Free Software Foundation, either version 3 of the License, or
9
+ (at your option) any later version.
10
+
11
+ This program is distributed in the hope that it will be useful,
12
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
13
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
+ GNU General Public License for more details.
15
+
16
+ You should have received a copy of the GNU General Public License
17
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
18
+ """
19
+
20
+ import gradio as gr
21
+ import codecs
22
+ from ast import literal_eval
23
+ from datetime import datetime
24
+ from rwkvstic.load import RWKV
25
+ from config import config, title
26
+ import torch
27
+ import gc
28
+
29
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
30
+
31
+ desc = '''<p>RNN with Transformer-level LLM Performance (<a href='https://github.com/BlinkDL/RWKV-LM'>github</a>).
32
+ According to the author: "It combines the best of RNN and transformers - great performance, fast inference, saves VRAM, fast training, "infinite" ctx_len, and free sentence embedding."'''
33
+
34
+ thanks = '''<p>Thanks to <a href='https://github.com/gururise/rwkv_gradio'>Gururise</a> for this template</p>'''
35
+
36
+
37
+ def to_md(text):
38
+ return text.replace("\n", "<br />")
39
+
40
+
41
+ def get_model():
42
+ model = None
43
+ model = RWKV(
44
+ **config
45
+ )
46
+ return model
47
+
48
+
49
+ model = get_model()
50
+
51
+
52
+
53
+ def infer(
54
+ prompt,
55
+ mode="generative",
56
+ max_new_tokens=10,
57
+ temperature=0.1,
58
+ top_p=1.0,
59
+ stop="<|endoftext|>",
60
+ end_adj=0.0,
61
+ seed=42,
62
+ ):
63
+ global model
64
+
65
+ if model == None:
66
+ gc.collect()
67
+ if (DEVICE == "cuda"):
68
+ torch.cuda.empty_cache()
69
+ model = get_model()
70
+
71
+ max_new_tokens = int(max_new_tokens)
72
+ temperature = float(temperature)
73
+ end_adj = float(end_adj)
74
+ top_p = float(top_p)
75
+ stop = [x.strip(' ') for x in stop.split(',')]
76
+ seed = seed
77
+
78
+ assert 1 <= max_new_tokens <= 512
79
+ assert 0.0 <= temperature <= 5.0
80
+ assert 0.0 <= top_p <= 1.0
81
+
82
+ temperature = max(0.05, temperature)
83
+ if prompt == "":
84
+ prompt = " "
85
+
86
+ # Clear model state for generative mode
87
+ model.resetState()
88
+ if (mode == "Q/A"):
89
+ prompt = f"\nQ: {prompt}\n\nA:"
90
+ if (mode == "ELDR"):
91
+ prompt = f"\n{prompt}\n\nExpert Long Detailed Response:\n\nHi, thanks for reaching out, we would be happy to answer your question"
92
+ if (mode == "Expert"):
93
+ prompt = f"\n{prompt}\n\nExpert Full Response:\n\nHi, thanks for reaching out, we would be happy to answer your question.\n"
94
+ if (mode == "EFA"):
95
+ prompt = f'\nAsk Expert\n\nQuestion:\n{prompt}\n\nExpert Full Answer:\n'
96
+ if (mode == "BFR"):
97
+ prompt = f"Task given:\n\n{prompt}\n\nBest Full Response:"
98
+
99
+ print(f"PROMPT ({datetime.now()}):\n-------\n{prompt}")
100
+ print(f"OUTPUT ({datetime.now()}):\n-------\n")
101
+ # Load prompt
102
+ model.loadContext(newctx=prompt)
103
+ generated_text = ""
104
+ done = False
105
+ with torch.no_grad():
106
+ for _ in range(max_new_tokens):
107
+ char = model.forward(stopStrings=stop, temp=temperature, top_p_usual=top_p, end_adj=end_adj)[
108
+ "output"]
109
+ print(char, end='', flush=True)
110
+ generated_text += char
111
+ generated_text = generated_text.lstrip("\n ")
112
+
113
+ for stop_word in stop:
114
+ stop_word = codecs.getdecoder("unicode_escape")(stop_word)[0]
115
+ if stop_word != '' and stop_word in generated_text:
116
+ done = True
117
+ break
118
+ yield generated_text
119
+ if done:
120
+ print("<stopped>\n")
121
+ break
122
+
123
+ # print(f"{generated_text}")
124
+
125
+ for stop_word in stop:
126
+ stop_word = codecs.getdecoder("unicode_escape")(stop_word)[0]
127
+ if stop_word != '' and stop_word in generated_text:
128
+ generated_text = generated_text[:generated_text.find(stop_word)]
129
+
130
+ gc.collect()
131
+ yield generated_text
132
+ username = "USER"
133
+ intro = f'''The following is a verbose and detailed conversation between an AI assistant called FRITZ, and a human user called USER. FRITZ is intelligent, knowledgeable, wise and polite.
134
+
135
+ {username}: What year was the french revolution?
136
+ FRITZ: The French Revolution started in 1789, and lasted 10 years until 1799.
137
+ {username}: 3+5=?
138
+ FRITZ: The answer is 8.
139
+ {username}: What year did the Berlin Wall fall?
140
+ FRITZ: The Berlin wall stood for 28 years and fell in 1989.
141
+ {username}: solve for a: 9-a=2
142
+ FRITZ: The answer is a=7, because 9-7 = 2.
143
+ {username}: wat is lhc
144
+ FRITZ: The Large Hadron Collider (LHC) is a high-energy particle collider, built by CERN, and completed in 2008. It was used to confirm the existence of the Higgs boson in 2012.
145
+ {username}: Tell me about yourself.
146
+ FRITZ: My name is Fritz. I am an RNN based Large Language Model (LLM).
147
+ '''
148
+ model.resetState()
149
+ model.loadContext(newctx=intro)
150
+ chatState = model.getState()
151
+ model.resetState()
152
+ def chat(
153
+ prompt,
154
+ history,
155
+ max_new_tokens=10,
156
+ temperature=0.1,
157
+ top_p=1.0,
158
+ seed=42,
159
+ ):
160
+ global model
161
+ global username
162
+ history = history or []
163
+
164
+ intro = ""
165
+
166
+ if model == None:
167
+ gc.collect()
168
+ if (DEVICE == "cuda"):
169
+ torch.cuda.empty_cache()
170
+ model = get_model()
171
+
172
+ username = username.strip()
173
+ username = username or "USER"
174
+
175
+
176
+
177
+ if len(history) == 0:
178
+ # no history, so lets reset chat state
179
+ model.setState(chatState)
180
+ history = [[], model.emptyState]
181
+ print("reset chat state")
182
+ else:
183
+ if (history[0][0][0].split(':')[0] != username):
184
+ model.setState((chatState[0],chatState[1].clone()))
185
+ history = [[], model.chatState]
186
+ print("username changed, reset state")
187
+ else:
188
+ model.setState((history[1][0],history[1][1].clone()))
189
+ intro = ""
190
+
191
+ max_new_tokens = int(max_new_tokens)
192
+ temperature = float(temperature)
193
+ top_p = float(top_p)
194
+ seed = seed
195
+
196
+ assert 1 <= max_new_tokens <= 512
197
+ assert 0.0 <= temperature <= 3.0
198
+ assert 0.0 <= top_p <= 1.0
199
+
200
+ temperature = max(0.05, temperature)
201
+
202
+ prompt = f"{username}: " + prompt + "\n"
203
+ print(f"CHAT ({datetime.now()}):\n-------\n{prompt}")
204
+ print(f"OUTPUT ({datetime.now()}):\n-------\n")
205
+ # Load prompt
206
+
207
+ model.loadContext(newctx=prompt)
208
+
209
+ out = model.forward(number=max_new_tokens, stopStrings=[
210
+ "<|endoftext|>", username+":"], temp=temperature, top_p_usual=top_p)
211
+
212
+ generated_text = out["output"].lstrip("\n ")
213
+ generated_text = generated_text.rstrip(username+":")
214
+ print(f"{generated_text}")
215
+
216
+ gc.collect()
217
+ history[0].append((prompt, generated_text))
218
+ return history[0], [history[0], out["state"]]
219
+
220
+
221
+ examples = [
222
+ [
223
+ # Question Answering
224
+ '''What is the capital of Germany?''', "Q/A", 25, 0.2, 1.0, "<|endoftext|>"],
225
+ [
226
+ # Question Answering
227
+ '''Are humans good or bad?''', "Q/A", 150, 0.8, 0.8, "<|endoftext|>"],
228
+ [
229
+ # Question Answering
230
+ '''What is the purpose of Vitamin A?''', "Q/A", 50, 0.2, 0.8, "<|endoftext|>"],
231
+ [
232
+ # Chatbot
233
+ '''This is a conversation between two AI large language models named Alex and Fritz. They are exploring each other's capabilities, and trying to ask interesting questions of one another to explore the limits of each others AI.
234
+
235
+ Conversation:
236
+ Alex: Good morning, Fritz, what type of LLM are you based upon?
237
+ Fritz: Morning Alex, I am an RNN with transformer level performance. My language model is 100% attention free.
238
+ Alex:''', "generative", 220, 0.9, 0.9, "\\n\\n,<|endoftext|>"],
239
+ [
240
+ # Generate List
241
+ '''Task given:
242
+
243
+ Please Write a Short story about a cat learning python
244
+
245
+ Best Full Response:
246
+ ''', "generative", 140, 0.85, 0.8, "<|endoftext|>"],
247
+ [
248
+ # Natural Language Interface
249
+ '''Here is a short story (in the style of Tolkien) in which Aiden attacks a robot with a sword:
250
+ ''', "generative", 140, 0.85, 0.8, "<|endoftext|>"]
251
+ ]
252
+
253
+
254
+ iface = gr.Interface(
255
+ fn=infer,
256
+ description=f'''<h3>Generative and Question/Answer</h3>{desc}{thanks}''',
257
+ allow_flagging="never",
258
+ inputs=[
259
+ gr.Textbox(lines=20, label="Prompt"), # prompt
260
+ gr.Radio(["Freeform", "Q/A","ELDR","Expert","EFR","BFR"],
261
+ value="Expert", label="Choose Mode"),
262
+ gr.Slider(1, 512, value=40), # max_tokens
263
+ gr.Slider(0.0, 5.0, value=0.9), # temperature
264
+ gr.Slider(0.0, 1.0, value=0.85), # top_p
265
+ gr.Textbox(lines=1, value="<|endoftext|>"), # stop
266
+ gr.Slider(-999, 0.0, value=0.0), # end_adj
267
+
268
+ ],
269
+ outputs=gr.Textbox(label="Generated Output", lines=25),
270
+ examples=examples,
271
+ cache_examples=False,
272
+ ).queue()
273
+
274
+ chatiface = gr.Interface(
275
+ fn=chat,
276
+ description=f'''<h3>Chatbot</h3><h4>Refresh page or change name to reset memory context</h4>{desc}{thanks}''',
277
+ allow_flagging="never",
278
+ inputs=[
279
+ gr.Textbox(lines=5, label="Message"), # prompt
280
+ "state",
281
+ gr.Slider(1, 256, value=60), # max_tokens
282
+ gr.Slider(0.0, 1.0, value=0.8), # temperature
283
+ gr.Slider(0.0, 1.0, value=0.85) # top_p
284
+ ],
285
+ outputs=[gr.Chatbot(label="Chat Log", color_map=(
286
+ "green", "pink")), "state"],
287
+ ).queue()
288
+
289
+ demo = gr.TabbedInterface(
290
+
291
+ [iface, chatiface], ["Q/A", "Chatbot"],
292
+ title=title,
293
+
294
+ )
295
+
296
+ demo.queue()
297
+ demo.launch(share=False)
config.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from rwkvstic.agnostic.backends import TORCH, TORCH_QUANT
2
+ import torch
3
+
4
+ quantized = {
5
+ "mode": TORCH_QUANT,
6
+ "runtimedtype": torch.bfloat16,
7
+ "useGPU": torch.cuda.is_available(),
8
+ "chunksize": 32, # larger = more accurate, but more memory
9
+ "target": 100 # your gpu max size, excess vram offloaded to cpu
10
+ }
11
+
12
+ # UNCOMMENT TO SELECT OPTIONS
13
+ # Not full list of options, see https://pypi.org/project/rwkvstic/ and https://huggingface.co/BlinkDL/ for more models/modes
14
+
15
+ # RWKV 1B5 instruct test 1 model
16
+ # Approximate
17
+ # [Vram usage: 6.0GB]
18
+ # [File size: 3.0GB]
19
+
20
+
21
+ config = {
22
+ "path":"14B-8K-rwkvstic-2-1-2.rwkv"
23
+ #"useLogFix":False # When enabled, use BlinkDLs version of the att.
24
+ }
25
+
26
+ title = "RWKV-4 (14B 8k)"
27
+
28
+ # RWKV 1B5 instruct model quantized
29
+ # Approximate
30
+ # [Vram usage: 1.3GB]
31
+ # [File size: 3.0GB]
32
+
33
+ # config = {
34
+ # "path": "https://huggingface.co/BlinkDL/rwkv-4-pile-1b5/resolve/main/RWKV-4-Pile-1B5-Instruct-test1-20230124.pth",
35
+ # **quantized
36
+ # }
37
+
38
+ # title = "RWKV-4 (1.5b Instruct Quantized)"
39
+
40
+ # RWKV 7B instruct pre-quantized (settings baked into model)
41
+ # Approximate
42
+ # [Vram usage: 7.0GB]
43
+ # [File size: 8.0GB]
44
+
45
+ # config = {
46
+ # "path": "https://huggingface.co/Hazzzardous/RWKV-8Bit/resolve/main/RWKV-4-Pile-7B-Instruct.pqth"
47
+ # }
48
+
49
+ # title = "RWKV-4 (7b Instruct Quantized)"
50
+
51
+ # RWKV 14B quantized (latest as of feb 9)
52
+ # Approximate
53
+ # [Vram usage: 15.0GB]
54
+ # [File size: 15.0GB]
55
+
56
+ # config = {
57
+ # "path": "https://huggingface.co/Hazzzardous/RWKV-8Bit/resolve/main/RWKV-4-Pile-14B-20230204-7324.pqth"
58
+ # }
59
+
60
+ # title = "RWKV-4 (14b 94% trained, not yet instruct tuned, 8-Bit)"
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ transformers
2
+ torch
3
+ inquirer
4
+
5
+ rwkvstic>=2.1.2
6
+ scipy
7
+ onnx