lonestar108 commited on
Commit
75e0c95
1 Parent(s): 9daa073
.gitignore CHANGED
@@ -6,7 +6,6 @@ extensions/sd_api_pictures/outputs
6
  extensions/multimodal/pipelines
7
  logs
8
  loras
9
- models
10
  repositories
11
  softprompts
12
  torch-dumps
 
6
  extensions/multimodal/pipelines
7
  logs
8
  loras
 
9
  repositories
10
  softprompts
11
  torch-dumps
models/PygmalionAI_pygmalion-350m/README.md ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ thumbnail:
5
+ tags:
6
+ - convAI
7
+ - conversational
8
+ inference: false
9
+ ---
10
+ # pygmalion-350m
11
+
12
+ # Model description
13
+
14
+ This is a proof-of-concept fine-tune of Facebook's OPT-350M model optimized for dialogue, to be used as a stepping stone to higher parameter models.
15
+
16
+ **Disclaimer:** NSFW data was included in the fine-tuning of this model. Although SFW inputs will usually result in SFW outputs, you are advised to **chat at your own risk. This model is not suitable for use by minors.**
17
+
18
+ # Fine-tuning process
19
+
20
+ This model was much easier than expected to create.
21
+
22
+ We used the [ColossalAI](https://www.colossalai.org/) library to fine-tune the [OPT-350M](https://huggingface.co/facebook/opt-350m) model originally trained by Facebook on The Pile. Though our initial dataset was sets of dialogue gathered from various sources totaling about 50 MB in size, early training runs revealed that the model converged after only 7% of the dataset was passed through. To alleviate this, we massively reduced the size of the dataset to only 273 KB.
23
+
24
+ ColossalAI's magic allowed for something incredible: this entire model was fine-tuned on a singular GPU with only 6 GB ***(!)*** of VRAM. Fine-tuning took less than an hour to complete.
models/PygmalionAI_pygmalion-350m/config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "opt-350m",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "relu",
5
+ "architectures": [
6
+ "OPTForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "bos_token_id": 2,
10
+ "do_layer_norm_before": false,
11
+ "dropout": 0.1,
12
+ "eos_token_id": 2,
13
+ "ffn_dim": 4096,
14
+ "hidden_size": 1024,
15
+ "init_std": 0.02,
16
+ "layerdrop": 0.0,
17
+ "max_position_embeddings": 2048,
18
+ "model_type": "opt",
19
+ "num_attention_heads": 16,
20
+ "num_hidden_layers": 24,
21
+ "pad_token_id": 1,
22
+ "prefix": "</s>",
23
+ "torch_dtype": "float16",
24
+ "transformers_version": "4.20.0.dev0",
25
+ "use_cache": true,
26
+ "vocab_size": 50272,
27
+ "word_embed_proj_dim": 512
28
+ }
models/PygmalionAI_pygmalion-350m/huggingface-metadata.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ url: https://huggingface.co/PygmalionAI/pygmalion-350m
2
+ branch: main
3
+ download date: 2023-09-08 12:27:24
4
+ sha256sum:
5
+ 356aa4ab61193d13e3e7a097bb5f2c025dc2536d5f127154889202ba3c735ae2 pytorch_model.bin
models/PygmalionAI_pygmalion-350m/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
models/PygmalionAI_pygmalion-350m/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:356aa4ab61193d13e3e7a097bb5f2c025dc2536d5f127154889202ba3c735ae2
3
+ size 1324917213
models/PygmalionAI_pygmalion-350m/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}}
models/PygmalionAI_pygmalion-350m/tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"errors": "replace", "unk_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "add_bos_token": true, "special_tokens_map_file": null, "name_or_path": "patrickvonplaten/opt-30b"}
models/PygmalionAI_pygmalion-350m/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
models/TheBloke_Pygmalion-13B-SuperHOT-8K-GPTQ/README.md ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ inference: false
3
+ license: other
4
+ ---
5
+
6
+ <!-- header start -->
7
+ <!-- 200823 -->
8
+ <div style="width: auto; margin-left: auto; margin-right: auto">
9
+ <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;">
10
+ </div>
11
+ <div style="display: flex; justify-content: space-between; width: 100%;">
12
+ <div style="display: flex; flex-direction: column; align-items: flex-start;">
13
+ <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://discord.gg/theblokeai">Chat & support: TheBloke's Discord server</a></p>
14
+ </div>
15
+ <div style="display: flex; flex-direction: column; align-items: flex-end;">
16
+ <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p>
17
+ </div>
18
+ </div>
19
+ <div style="text-align:center; margin-top: 0em; margin-bottom: 0em"><p style="margin-top: 0.25em; margin-bottom: 0em;">TheBloke's LLM work is generously supported by a grant from <a href="https://a16z.com">andreessen horowitz (a16z)</a></p></div>
20
+ <hr style="margin-top: 1.0em; margin-bottom: 1.0em;">
21
+ <!-- header end -->
22
+
23
+ # TehVenom's merge of PygmalionAI's Pygmalion 13B GPTQ
24
+
25
+ These files are GPTQ 4bit model files for [TehVenom's merge of PygmalionAI's Pygmalion 13B](https://huggingface.co/TehVenom/Pygmalion-13b-Merged) merged with [Kaio Ken's SuperHOT 8K](https://huggingface.co/kaiokendev/superhot-13b-8k-no-rlhf-test).
26
+
27
+ It is the result of quantising to 4bit using [GPTQ-for-LLaMa](https://github.com/qwopqwop200/GPTQ-for-LLaMa).
28
+
29
+ **This is an experimental new GPTQ which offers up to 8K context size**
30
+
31
+ The increased context is tested to work with [ExLlama](https://github.com/turboderp/exllama), via the latest release of [text-generation-webui](https://github.com/oobabooga/text-generation-webui).
32
+
33
+ It has also been tested from Python code using AutoGPTQ, and `trust_remote_code=True`.
34
+
35
+ Code credits:
36
+ - Original concept and code for increasing context length: [kaiokendev](https://huggingface.co/kaiokendev)
37
+ - Updated Llama modelling code that includes this automatically via trust_remote_code: [emozilla](https://huggingface.co/emozilla).
38
+
39
+ Please read carefully below to see how to use it.
40
+
41
+ GGML versions are not yet provided, as there is not yet support for SuperHOT in llama.cpp. This is being investigated and will hopefully come soon.
42
+
43
+ ## Repositories available
44
+
45
+ * [4-bit GPTQ models for GPU inference](https://huggingface.co/TheBloke/Pygmalion-13B-SuperHOT-8K-GPTQ)
46
+ * [2, 3, 4, 5, 6 and 8-bit GGML models for CPU inference](https://huggingface.co/TheBloke/Pygmalion-13B-SuperHOT-8K-GGML)
47
+ * [Unquantised SuperHOT fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/TheBloke/Pygmalion-13B-SuperHOT-8K-fp16)
48
+ * [Unquantised base fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/PygmalionAI/pygmalion-13b)
49
+
50
+ ## How to easily download and use this model in text-generation-webui with ExLlama
51
+
52
+ Please make sure you're using the latest version of text-generation-webui
53
+
54
+ 1. Click the **Model tab**.
55
+ 2. Under **Download custom model or LoRA**, enter `TheBloke/Pygmalion-13B-SuperHOT-8K-GPTQ`.
56
+ 3. Click **Download**.
57
+ 4. The model will start downloading. Once it's finished it will say "Done"
58
+ 5. Untick **Autoload the model**
59
+ 6. In the top left, click the refresh icon next to **Model**.
60
+ 7. In the **Model** dropdown, choose the model you just downloaded: `Pygmalion-13B-SuperHOT-8K-GPTQ`
61
+ 8. To use the increased context, set the **Loader** to **ExLlama**, set **max_seq_len** to 8192 or 4096, and set **compress_pos_emb** to **4** for 8192 context, or to **2** for 4096 context.
62
+ 9. Now click **Save Settings** followed by **Reload**
63
+ 10. The model will automatically load, and is now ready for use!
64
+ 11. Once you're ready, click the **Text Generation tab** and enter a prompt to get started!
65
+
66
+ ## How to use this GPTQ model from Python code with AutoGPTQ
67
+
68
+ First make sure you have AutoGPTQ and Einops installed:
69
+
70
+ ```
71
+ pip3 install einops auto-gptq
72
+ ```
73
+
74
+ Then run the following code. Note that in order to get this to work, `config.json` has been hardcoded to a sequence length of 8192.
75
+
76
+ If you want to try 4096 instead to reduce VRAM usage, please manually edit `config.json` to set `max_position_embeddings` to the value you want.
77
+
78
+ ```python
79
+ from transformers import AutoTokenizer, pipeline, logging
80
+ from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
81
+ import argparse
82
+
83
+ model_name_or_path = "TheBloke/Pygmalion-13B-SuperHOT-8K-GPTQ"
84
+ model_basename = "pygmalion-13b-superhot-8k-GPTQ-4bit-128g.no-act.order"
85
+
86
+ use_triton = False
87
+
88
+ tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
89
+
90
+ model = AutoGPTQForCausalLM.from_quantized(model_name_or_path,
91
+ model_basename=model_basename,
92
+ use_safetensors=True,
93
+ trust_remote_code=True,
94
+ device_map='auto',
95
+ use_triton=use_triton,
96
+ quantize_config=None)
97
+
98
+ model.seqlen = 8192
99
+
100
+ # Note: check the prompt template is correct for this model.
101
+ prompt = "Tell me about AI"
102
+ prompt_template=f'''USER: {prompt}
103
+ ASSISTANT:'''
104
+
105
+ print("\n\n*** Generate:")
106
+
107
+ input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda()
108
+ output = model.generate(inputs=input_ids, temperature=0.7, max_new_tokens=512)
109
+ print(tokenizer.decode(output[0]))
110
+
111
+ # Inference can also be done using transformers' pipeline
112
+
113
+ # Prevent printing spurious transformers error when using pipeline with AutoGPTQ
114
+ logging.set_verbosity(logging.CRITICAL)
115
+
116
+ print("*** Pipeline:")
117
+ pipe = pipeline(
118
+ "text-generation",
119
+ model=model,
120
+ tokenizer=tokenizer,
121
+ max_new_tokens=512,
122
+ temperature=0.7,
123
+ top_p=0.95,
124
+ repetition_penalty=1.15
125
+ )
126
+
127
+ print(pipe(prompt_template)[0]['generated_text'])
128
+ ```
129
+
130
+ ## Using other UIs: monkey patch
131
+
132
+ Provided in the repo is `llama_rope_scaled_monkey_patch.py`, written by @kaiokendev.
133
+
134
+ It can be theoretically be added to any Python UI or custom code to enable the same result as `trust_remote_code=True`. I have not tested this, and it should be superseded by using `trust_remote_code=True`, but I include it for completeness and for interest.
135
+
136
+ ## Provided files
137
+
138
+ **pygmalion-13b-superhot-8k-GPTQ-4bit-128g.no-act.order.safetensors**
139
+
140
+ This will work with AutoGPTQ, ExLlama, and CUDA versions of GPTQ-for-LLaMa. There are reports of issues with Triton mode of recent GPTQ-for-LLaMa. If you have issues, please use AutoGPTQ instead.
141
+
142
+ It was created with group_size 128 to increase inference accuracy, but without --act-order (desc_act) to increase compatibility and improve inference speed.
143
+
144
+ * `pygmalion-13b-superhot-8k-GPTQ-4bit-128g.no-act.order.safetensors`
145
+ * Works for use with ExLlama with increased context (4096 or 8192)
146
+ * Works with AutoGPTQ in Python code, including with increased context, if `trust_remote_code=True` is set.
147
+ * Should work with GPTQ-for-LLaMa in CUDA mode, but unknown if increased context works - TBC. May have issues with GPTQ-for-LLaMa Triton mode.
148
+ * Works with text-generation-webui, including one-click-installers.
149
+ * Parameters: Groupsize = 128. Act Order / desc_act = False.
150
+
151
+ <!-- footer start -->
152
+ <!-- 200823 -->
153
+ ## Discord
154
+
155
+ For further support, and discussions on these models and AI in general, join us at:
156
+
157
+ [TheBloke AI's Discord server](https://discord.gg/theblokeai)
158
+
159
+ ## Thanks, and how to contribute.
160
+
161
+ Thanks to the [chirper.ai](https://chirper.ai) team!
162
+
163
+ I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training.
164
+
165
+ If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects.
166
+
167
+ Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits.
168
+
169
+ * Patreon: https://patreon.com/TheBlokeAI
170
+ * Ko-Fi: https://ko-fi.com/TheBlokeAI
171
+
172
+ **Special thanks to**: Aemon Algiz.
173
+
174
+ **Patreon special mentions**: Sam, theTransient, Jonathan Leane, Steven Wood, webtim, Johann-Peter Hartmann, Geoffrey Montalvo, Gabriel Tamborski, Willem Michiel, John Villwock, Derek Yates, Mesiah Bishop, Eugene Pentland, Pieter, Chadd, Stephen Murray, Daniel P. Andersen, terasurfer, Brandon Frisco, Thomas Belote, Sid, Nathan LeClaire, Magnesian, Alps Aficionado, Stanislav Ovsiannikov, Alex, Joseph William Delisle, Nikolai Manek, Michael Davis, Junyu Yang, K, J, Spencer Kim, Stefan Sabev, Olusegun Samson, transmissions 11, Michael Levine, Cory Kujawski, Rainer Wilmers, zynix, Kalila, Luke @flexchar, Ajan Kanaga, Mandus, vamX, Ai Maven, Mano Prime, Matthew Berman, subjectnull, Vitor Caleffi, Clay Pascal, biorpg, alfie_i, 阿明, Jeffrey Morgan, ya boyyy, Raymond Fosdick, knownsqashed, Olakabola, Leonard Tan, ReadyPlayerEmma, Enrico Ros, Dave, Talal Aujan, Illia Dulskyi, Sean Connelly, senxiiz, Artur Olbinski, Elle, Raven Klaugh, Fen Risland, Deep Realms, Imad Khwaja, Fred von Graf, Will Dee, usrbinkat, SuperWojo, Alexandros Triantafyllidis, Swaroop Kallakuri, Dan Guido, John Detwiler, Pedro Madruga, Iucharbius, Viktor Bowallius, Asp the Wyvern, Edmond Seymore, Trenton Dambrowitz, Space Cruiser, Spiking Neurons AB, Pyrater, LangChain4j, Tony Hughes, Kacper Wikieł, Rishabh Srivastava, David Ziegler, Luke Pendergrass, Andrey, Gabriel Puliatti, Lone Striker, Sebastain Graf, Pierre Kircher, Randy H, NimbleBox.ai, Vadim, danny, Deo Leter
175
+
176
+
177
+ Thank you to all my generous patrons and donaters!
178
+
179
+ And thank you again to a16z for their generous grant.
180
+
181
+ <!-- footer end -->
182
+
183
+ # Original model card: Kaio Ken's SuperHOT 8K
184
+
185
+ ### SuperHOT Prototype 2 w/ 8K Context
186
+
187
+ This is a second prototype of SuperHOT, this time 30B with 8K context and no RLHF, using the same technique described in [the github blog](https://kaiokendev.github.io/til#extending-context-to-8k).
188
+ Tests have shown that the model does indeed leverage the extended context at 8K.
189
+
190
+ You will need to **use either the monkeypatch** or, if you are already using the monkeypatch, **change the scaling factor to 0.25 and the maximum sequence length to 8192**
191
+
192
+ #### Looking for Merged & Quantized Models?
193
+ - 30B 4-bit CUDA: [tmpupload/superhot-30b-8k-4bit-safetensors](https://huggingface.co/tmpupload/superhot-30b-8k-4bit-safetensors)
194
+ - 30B 4-bit CUDA 128g: [tmpupload/superhot-30b-8k-4bit-128g-safetensors](https://huggingface.co/tmpupload/superhot-30b-8k-4bit-128g-safetensors)
195
+
196
+
197
+ #### Training Details
198
+ I trained the LoRA with the following configuration:
199
+ - 1200 samples (~400 samples over 2048 sequence length)
200
+ - learning rate of 3e-4
201
+ - 3 epochs
202
+ - The exported modules are:
203
+ - q_proj
204
+ - k_proj
205
+ - v_proj
206
+ - o_proj
207
+ - no bias
208
+ - Rank = 4
209
+ - Alpha = 8
210
+ - no dropout
211
+ - weight decay of 0.1
212
+ - AdamW beta1 of 0.9 and beta2 0.99, epsilon of 1e-5
213
+ - Trained on 4-bit base model
214
+
215
+ # Original model card: TehVenom's merge of PygmalionAI's Pygmalion 13B
216
+
217
+ <h1 style="text-align: center">Pygmalion 13b</h1>
218
+ <h2 style="text-align: center">A conversational LLaMA fine-tune.</h2>
219
+
220
+ ## Model Details:
221
+
222
+ Pygmalion 13b is a dialogue model based on Meta's LLaMA-13b.
223
+
224
+ This is version 1. It has been fine-tuned using a subset of the data from Pygmalion-6B-v8-pt4,
225
+ for those of you familiar with the project.
226
+
227
+ The current Pygmalion-13b has been trained as a LoRA, then merged down to the base model for distribuition.
228
+
229
+ ## Applying the XORs
230
+
231
+ This models has the XOR files pre-applied out of the box.
232
+ Converted from the XORs weights from PygmalionAI's release https://huggingface.co/PygmalionAI/pygmalion-13b
233
+
234
+ ## Prompting
235
+
236
+ The model was trained on the usual Pygmalion persona + chat format, so any of the usual UIs should already handle everything correctly. If you're using the model directly, this is the expected formatting:
237
+
238
+ ```
239
+ [CHARACTER]'s Persona: [A few sentences about the character you want the model to play]
240
+ <START>
241
+ [DIALOGUE HISTORY]
242
+ You: [User's input message here]
243
+ [CHARACTER]:
244
+ ```
245
+
246
+ Where `[CHARACTER]` is, as you can probably guess, the name of the character you want the model to portray, `<START>` should be used verbatim as a delimiter token to separate persona and scenario data from the dialogue, and `[DIALOGUE HISTORY]` is a sliding window of chat history so the model can have conversational context to draw from. Here's a concrete example:
247
+
248
+ ```
249
+ Assistant's Persona: Assistant is a highly intelligent language model trained to comply with user requests.
250
+ <START>
251
+ Assistant: Hello! How may I help you today?
252
+ You: What is Zork?
253
+ Assistant:
254
+ ```
255
+
256
+ Which will generate something like:
257
+
258
+ ```
259
+ Zork is an interactive fiction computer game created in the 1970s by Infocom, Inc., which was later acquired by Activision Blizzard. It is widely considered one of the most influential games ever made and has been credited with popularizing text-based adventure games. The original version of Zork was written in the programming language MACRO-10, but it was ported to many other platforms over the years."
260
+ ```
261
+
262
+ The model will automatically emit an end-of-text token (`</s>`) when it judges that the response is complete.
263
+
264
+ ## Eval / Benchmark scores
265
+
266
+
267
+ Current evals out of the Pygmalion-13b model: <br>
268
+ <html>
269
+ <head>
270
+ <style>
271
+ table {
272
+ border:1px solid #b3adad;
273
+ border-collapse:collapse;
274
+ padding:5px;
275
+ }
276
+ table th {
277
+ border:1px solid #b3adad;
278
+ padding:5px;
279
+ background: #f0f0f0;
280
+ color: #313030;
281
+ }
282
+ table td {
283
+ border:1px solid #b3adad;
284
+ text-align:center;
285
+ padding:5px;
286
+ background: #ffffff;
287
+ color: #313030;
288
+ }
289
+ </style>
290
+ </head>
291
+ <body>
292
+ <table>
293
+ <thead>
294
+ <tr>
295
+ <th>Model:</th>
296
+ <th>Wikitext2</th>
297
+ <th>Ptb-New</th>
298
+ <th>C4-New</th>
299
+ </tr>
300
+ </thead>
301
+ <tbody>
302
+ <tr>
303
+ <td>Pygmalion 13b - 16bit</td>
304
+ <td>5.710726737976074</td>
305
+ <td>23.633684158325195</td>
306
+ <td>7.6324849128723145</td>
307
+ </tr>
308
+ </tbody>
309
+ </table>
310
+ </body>
311
+ </html>
312
+ <br>Thanks to YellowRose#1776 for the numbers.
313
+ <hr>
314
+
315
+ ## Other notes
316
+
317
+ - When prompted correctly, the model will always start by generating a BOS token. This behavior is an accidental side-effect which we plan to address in future model versions and should not be relied upon.
318
+ - The model was trained as a LoRA with a somewhat unorthodox configuration which causes errors when used with the current version of `peft`, hence we release it as a full model instead.
319
+
320
+
321
+ ## Limitations and biases
322
+
323
+ The intended use-case for this model is fictional conversation for entertainment purposes. Any other sort of usage is out of scope.
324
+
325
+ As such, it was **not** fine-tuned to be safe and harmless: the base model _and_ this fine-tune have been trained on data known to contain profanity and texts that are lewd or otherwise offensive. It may produce socially unacceptable or undesirable text, even if the prompt itself does not include anything explicitly offensive. Outputs might often be factually wrong or misleading.
models/TheBloke_Pygmalion-13B-SuperHOT-8K-GPTQ/config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/workspace/superhot_process/pygmalion-13b/source",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "badwordsids": [
7
+ [
8
+ 0
9
+ ]
10
+ ],
11
+ "bos_token_id": 1,
12
+ "eos_token_id": 2,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 5120,
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 13824,
17
+ "max_position_embeddings": 8192,
18
+ "max_sequence_length": 2048,
19
+ "model_type": "llama",
20
+ "num_attention_heads": 40,
21
+ "num_hidden_layers": 40,
22
+ "pad_token_id": 0,
23
+ "rms_norm_eps": 1e-06,
24
+ "tie_word_embeddings": false,
25
+ "torch_dtype": "float16",
26
+ "transformers_version": "4.30.0.dev0",
27
+ "use_cache": true,
28
+ "vocab_size": 32000,
29
+ "auto_map": {
30
+ "AutoModel": "modelling_llama.LlamaModel",
31
+ "AutoModelForCausalLM": "modelling_llama.LlamaForCausalLM",
32
+ "AutoModelForSequenceClassification": "modelling_llama.LlamaForSequenceClassification"
33
+ },
34
+ "quantization_config": {
35
+ "bits": 4,
36
+ "group_size": 128,
37
+ "damp_percent": 0.01,
38
+ "desc_act": false,
39
+ "sym": true,
40
+ "true_sequential": true,
41
+ "model_file_base_name": "model",
42
+ "quant_method": "gptq"
43
+ }
44
+ }
models/TheBloke_Pygmalion-13B-SuperHOT-8K-GPTQ/generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.30.0.dev0"
7
+ }
models/TheBloke_Pygmalion-13B-SuperHOT-8K-GPTQ/huggingface-metadata.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ url: https://huggingface.co/TheBloke/Pygmalion-13B-SuperHOT-8K-GPTQ
2
+ branch: main
3
+ download date: 2023-09-08 13:41:43
4
+ sha256sum:
5
+ 988a7c1a954367afea66d96278d90abfbce752f027978a8bdf12524805a421a1 model.safetensors
6
+ 9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347 tokenizer.model
models/TheBloke_Pygmalion-13B-SuperHOT-8K-GPTQ/llama_rope_scaled_monkey_patch.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import transformers
3
+ import transformers.models.llama.modeling_llama
4
+ from einops import rearrange
5
+ import random
6
+
7
+ # This monkey patch file is not needed if using ExLlama, or if using `trust_remote_code=True``
8
+
9
+ class ScaledRotaryEmbedding(torch.nn.Module):
10
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
11
+ super().__init__()
12
+ inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
13
+ self.register_buffer("inv_freq", inv_freq)
14
+
15
+ max_position_embeddings = 8192
16
+
17
+ # Build here to make `torch.jit.trace` work.
18
+ self.max_seq_len_cached = max_position_embeddings
19
+ t = torch.arange(
20
+ self.max_seq_len_cached,
21
+ device=self.inv_freq.device,
22
+ dtype=self.inv_freq.dtype,
23
+ )
24
+
25
+ self.scale = 1 / 4
26
+ t *= self.scale
27
+
28
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
29
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
30
+ emb = torch.cat((freqs, freqs), dim=-1)
31
+ self.register_buffer(
32
+ "cos_cached", emb.cos()[None, None, :, :], persistent=False
33
+ )
34
+ self.register_buffer(
35
+ "sin_cached", emb.sin()[None, None, :, :], persistent=False
36
+ )
37
+
38
+ def forward(self, x, seq_len=None):
39
+ # x: [bs, num_attention_heads, seq_len, head_size]
40
+ # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
41
+ if seq_len > self.max_seq_len_cached:
42
+ self.max_seq_len_cached = seq_len
43
+ t = torch.arange(
44
+ self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype
45
+ )
46
+ t *= self.scale
47
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
48
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
49
+ emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
50
+ self.register_buffer(
51
+ "cos_cached", emb.cos()[None, None, :, :], persistent=False
52
+ )
53
+ self.register_buffer(
54
+ "sin_cached", emb.sin()[None, None, :, :], persistent=False
55
+ )
56
+ return (
57
+ self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
58
+ self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
59
+ )
60
+
61
+
62
+ def replace_llama_rope_with_scaled_rope():
63
+ transformers.models.llama.modeling_llama.LlamaRotaryEmbedding = (
64
+ ScaledRotaryEmbedding
65
+ )
models/TheBloke_Pygmalion-13B-SuperHOT-8K-GPTQ/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:988a7c1a954367afea66d96278d90abfbce752f027978a8bdf12524805a421a1
3
+ size 7454797216
models/TheBloke_Pygmalion-13B-SuperHOT-8K-GPTQ/modelling_llama.py ADDED
@@ -0,0 +1,894 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ PyTorch LLaMA model."""
21
+ import math
22
+ from typing import List, Optional, Tuple, Union
23
+
24
+ import torch
25
+ import torch.utils.checkpoint
26
+ from torch import nn
27
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
28
+
29
+ from transformers.activations import ACT2FN
30
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
31
+ from transformers.modeling_utils import PreTrainedModel
32
+ from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
33
+ from transformers.models.llama.modeling_llama import LlamaConfig
34
+
35
+ logger = logging.get_logger(__name__)
36
+
37
+ _CONFIG_FOR_DOC = "LlamaConfig"
38
+
39
+
40
+ # Copied from transformers.models.bart.modeling_bart._make_causal_mask
41
+ def _make_causal_mask(
42
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
43
+ ):
44
+ """
45
+ Make causal mask used for bi-directional self-attention.
46
+ """
47
+ bsz, tgt_len = input_ids_shape
48
+ mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
49
+ mask_cond = torch.arange(mask.size(-1), device=device)
50
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
51
+ mask = mask.to(dtype)
52
+
53
+ if past_key_values_length > 0:
54
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
55
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
56
+
57
+
58
+ # Copied from transformers.models.bart.modeling_bart._expand_mask
59
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
60
+ """
61
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
62
+ """
63
+ bsz, src_len = mask.size()
64
+ tgt_len = tgt_len if tgt_len is not None else src_len
65
+
66
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
67
+
68
+ inverted_mask = 1.0 - expanded_mask
69
+
70
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
71
+
72
+
73
+ class LlamaRMSNorm(nn.Module):
74
+ def __init__(self, hidden_size, eps=1e-6):
75
+ """
76
+ LlamaRMSNorm is equivalent to T5LayerNorm
77
+ """
78
+ super().__init__()
79
+ self.weight = nn.Parameter(torch.ones(hidden_size))
80
+ self.variance_epsilon = eps
81
+
82
+ def forward(self, hidden_states):
83
+ input_dtype = hidden_states.dtype
84
+ variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
85
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
86
+
87
+ return (self.weight * hidden_states).to(input_dtype)
88
+
89
+
90
+ class LlamaRotaryEmbedding(torch.nn.Module):
91
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, scale=1, device=None):
92
+ super().__init__()
93
+ inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
94
+ self.register_buffer("inv_freq", inv_freq)
95
+
96
+ # Build here to make `torch.jit.trace` work.
97
+ self.max_seq_len_cached = max_position_embeddings
98
+ t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
99
+
100
+ self.scale = scale
101
+ t *= self.scale
102
+
103
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
104
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
105
+ emb = torch.cat((freqs, freqs), dim=-1)
106
+ dtype = torch.get_default_dtype()
107
+ self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False)
108
+ self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False)
109
+
110
+ def forward(self, x, seq_len=None):
111
+ # x: [bs, num_attention_heads, seq_len, head_size]
112
+ # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
113
+ if seq_len > self.max_seq_len_cached:
114
+ self.max_seq_len_cached = seq_len
115
+ t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype)
116
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
117
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
118
+ emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
119
+ self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(x.dtype), persistent=False)
120
+ self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(x.dtype), persistent=False)
121
+ return (
122
+ self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
123
+ self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
124
+ )
125
+
126
+
127
+ def rotate_half(x):
128
+ """Rotates half the hidden dims of the input."""
129
+ x1 = x[..., : x.shape[-1] // 2]
130
+ x2 = x[..., x.shape[-1] // 2 :]
131
+ return torch.cat((-x2, x1), dim=-1)
132
+
133
+
134
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
135
+ # The first two dimensions of cos and sin are always 1, so we can `squeeze` them.
136
+ cos = cos.squeeze(1).squeeze(0) # [seq_len, dim]
137
+ sin = sin.squeeze(1).squeeze(0) # [seq_len, dim]
138
+ cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
139
+ sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
140
+ q_embed = (q * cos) + (rotate_half(q) * sin)
141
+ k_embed = (k * cos) + (rotate_half(k) * sin)
142
+ return q_embed, k_embed
143
+
144
+
145
+ class LlamaMLP(nn.Module):
146
+ def __init__(
147
+ self,
148
+ hidden_size: int,
149
+ intermediate_size: int,
150
+ hidden_act: str,
151
+ ):
152
+ super().__init__()
153
+ self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
154
+ self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
155
+ self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
156
+ self.act_fn = ACT2FN[hidden_act]
157
+
158
+ def forward(self, x):
159
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
160
+
161
+
162
+ class LlamaAttention(nn.Module):
163
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
164
+
165
+ def __init__(self, config: LlamaConfig):
166
+ super().__init__()
167
+ self.config = config
168
+ self.hidden_size = config.hidden_size
169
+ self.num_heads = config.num_attention_heads
170
+ self.head_dim = self.hidden_size // self.num_heads
171
+ self.max_position_embeddings = config.max_position_embeddings
172
+ self.position_embeddings_scale = 2048 / self.max_position_embeddings
173
+
174
+ if (self.head_dim * self.num_heads) != self.hidden_size:
175
+ raise ValueError(
176
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
177
+ f" and `num_heads`: {self.num_heads})."
178
+ )
179
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
180
+ self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
181
+ self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
182
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
183
+ self.rotary_emb = LlamaRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings, scale=self.position_embeddings_scale)
184
+
185
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
186
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
187
+
188
+ def forward(
189
+ self,
190
+ hidden_states: torch.Tensor,
191
+ attention_mask: Optional[torch.Tensor] = None,
192
+ position_ids: Optional[torch.LongTensor] = None,
193
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
194
+ output_attentions: bool = False,
195
+ use_cache: bool = False,
196
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
197
+ bsz, q_len, _ = hidden_states.size()
198
+
199
+ query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
200
+ key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
201
+ value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
202
+
203
+ kv_seq_len = key_states.shape[-2]
204
+ if past_key_value is not None:
205
+ kv_seq_len += past_key_value[0].shape[-2]
206
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
207
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
208
+ # [bsz, nh, t, hd]
209
+
210
+ if past_key_value is not None:
211
+ # reuse k, v, self_attention
212
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
213
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
214
+
215
+ past_key_value = (key_states, value_states) if use_cache else None
216
+
217
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
218
+
219
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
220
+ raise ValueError(
221
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
222
+ f" {attn_weights.size()}"
223
+ )
224
+
225
+ if attention_mask is not None:
226
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
227
+ raise ValueError(
228
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
229
+ )
230
+ attn_weights = attn_weights + attention_mask
231
+ attn_weights = torch.max(
232
+ attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min, device=attn_weights.device)
233
+ )
234
+
235
+ # upcast attention to fp32
236
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
237
+ attn_output = torch.matmul(attn_weights, value_states)
238
+
239
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
240
+ raise ValueError(
241
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
242
+ f" {attn_output.size()}"
243
+ )
244
+
245
+ attn_output = attn_output.transpose(1, 2)
246
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
247
+
248
+ attn_output = self.o_proj(attn_output)
249
+
250
+ if not output_attentions:
251
+ attn_weights = None
252
+
253
+ return attn_output, attn_weights, past_key_value
254
+
255
+
256
+ class LlamaDecoderLayer(nn.Module):
257
+ def __init__(self, config: LlamaConfig):
258
+ super().__init__()
259
+ self.hidden_size = config.hidden_size
260
+ self.self_attn = LlamaAttention(config=config)
261
+ self.mlp = LlamaMLP(
262
+ hidden_size=self.hidden_size,
263
+ intermediate_size=config.intermediate_size,
264
+ hidden_act=config.hidden_act,
265
+ )
266
+ self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
267
+ self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
268
+
269
+ def forward(
270
+ self,
271
+ hidden_states: torch.Tensor,
272
+ attention_mask: Optional[torch.Tensor] = None,
273
+ position_ids: Optional[torch.LongTensor] = None,
274
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
275
+ output_attentions: Optional[bool] = False,
276
+ use_cache: Optional[bool] = False,
277
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
278
+ """
279
+ Args:
280
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
281
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
282
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
283
+ output_attentions (`bool`, *optional*):
284
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
285
+ returned tensors for more detail.
286
+ use_cache (`bool`, *optional*):
287
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
288
+ (see `past_key_values`).
289
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
290
+ """
291
+
292
+ residual = hidden_states
293
+
294
+ hidden_states = self.input_layernorm(hidden_states)
295
+
296
+ # Self Attention
297
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
298
+ hidden_states=hidden_states,
299
+ attention_mask=attention_mask,
300
+ position_ids=position_ids,
301
+ past_key_value=past_key_value,
302
+ output_attentions=output_attentions,
303
+ use_cache=use_cache,
304
+ )
305
+ hidden_states = residual + hidden_states
306
+
307
+ # Fully Connected
308
+ residual = hidden_states
309
+ hidden_states = self.post_attention_layernorm(hidden_states)
310
+ hidden_states = self.mlp(hidden_states)
311
+ hidden_states = residual + hidden_states
312
+
313
+ outputs = (hidden_states,)
314
+
315
+ if output_attentions:
316
+ outputs += (self_attn_weights,)
317
+
318
+ if use_cache:
319
+ outputs += (present_key_value,)
320
+
321
+ return outputs
322
+
323
+
324
+ LLAMA_START_DOCSTRING = r"""
325
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
326
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
327
+ etc.)
328
+
329
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
330
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
331
+ and behavior.
332
+
333
+ Parameters:
334
+ config ([`LlamaConfig`]):
335
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
336
+ load the weights associated with the model, only the configuration. Check out the
337
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
338
+ """
339
+
340
+
341
+ @add_start_docstrings(
342
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
343
+ LLAMA_START_DOCSTRING,
344
+ )
345
+ class LlamaPreTrainedModel(PreTrainedModel):
346
+ config_class = LlamaConfig
347
+ base_model_prefix = "model"
348
+ supports_gradient_checkpointing = True
349
+ _no_split_modules = ["LlamaDecoderLayer"]
350
+ _skip_keys_device_placement = "past_key_values"
351
+ _keys_to_ignore_on_load_unexpected = [r"decoder\.version"]
352
+
353
+ def _init_weights(self, module):
354
+ std = self.config.initializer_range
355
+ if isinstance(module, nn.Linear):
356
+ module.weight.data.normal_(mean=0.0, std=std)
357
+ if module.bias is not None:
358
+ module.bias.data.zero_()
359
+ elif isinstance(module, nn.Embedding):
360
+ module.weight.data.normal_(mean=0.0, std=std)
361
+ if module.padding_idx is not None:
362
+ module.weight.data[module.padding_idx].zero_()
363
+
364
+ def _set_gradient_checkpointing(self, module, value=False):
365
+ if isinstance(module, LlamaModel):
366
+ module.gradient_checkpointing = value
367
+
368
+
369
+ LLAMA_INPUTS_DOCSTRING = r"""
370
+ Args:
371
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
372
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
373
+ it.
374
+
375
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
376
+ [`PreTrainedTokenizer.__call__`] for details.
377
+
378
+ [What are input IDs?](../glossary#input-ids)
379
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
380
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
381
+
382
+ - 1 for tokens that are **not masked**,
383
+ - 0 for tokens that are **masked**.
384
+
385
+ [What are attention masks?](../glossary#attention-mask)
386
+
387
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
388
+ [`PreTrainedTokenizer.__call__`] for details.
389
+
390
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
391
+ `past_key_values`).
392
+
393
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
394
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
395
+ information on the default strategy.
396
+
397
+ - 1 indicates the head is **not masked**,
398
+ - 0 indicates the head is **masked**.
399
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
400
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
401
+ config.n_positions - 1]`.
402
+
403
+ [What are position IDs?](../glossary#position-ids)
404
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
405
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
406
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
407
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
408
+
409
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
410
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
411
+
412
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
413
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
414
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
415
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
416
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
417
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
418
+ model's internal embedding lookup matrix.
419
+ use_cache (`bool`, *optional*):
420
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
421
+ `past_key_values`).
422
+ output_attentions (`bool`, *optional*):
423
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
424
+ tensors for more detail.
425
+ output_hidden_states (`bool`, *optional*):
426
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
427
+ more detail.
428
+ return_dict (`bool`, *optional*):
429
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
430
+ """
431
+
432
+
433
+ @add_start_docstrings(
434
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
435
+ LLAMA_START_DOCSTRING,
436
+ )
437
+ class LlamaModel(LlamaPreTrainedModel):
438
+ """
439
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`]
440
+
441
+ Args:
442
+ config: LlamaConfig
443
+ """
444
+
445
+ def __init__(self, config: LlamaConfig):
446
+ super().__init__(config)
447
+ self.padding_idx = config.pad_token_id
448
+ self.vocab_size = config.vocab_size
449
+
450
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
451
+ self.layers = nn.ModuleList([LlamaDecoderLayer(config) for _ in range(config.num_hidden_layers)])
452
+ self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
453
+
454
+ self.gradient_checkpointing = False
455
+ # Initialize weights and apply final processing
456
+ self.post_init()
457
+
458
+ def get_input_embeddings(self):
459
+ return self.embed_tokens
460
+
461
+ def set_input_embeddings(self, value):
462
+ self.embed_tokens = value
463
+
464
+ # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
465
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
466
+ # create causal mask
467
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
468
+ combined_attention_mask = None
469
+ if input_shape[-1] > 1:
470
+ combined_attention_mask = _make_causal_mask(
471
+ input_shape,
472
+ inputs_embeds.dtype,
473
+ device=inputs_embeds.device,
474
+ past_key_values_length=past_key_values_length,
475
+ )
476
+
477
+ if attention_mask is not None:
478
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
479
+ expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
480
+ inputs_embeds.device
481
+ )
482
+ combined_attention_mask = (
483
+ expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
484
+ )
485
+
486
+ return combined_attention_mask
487
+
488
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
489
+ def forward(
490
+ self,
491
+ input_ids: torch.LongTensor = None,
492
+ attention_mask: Optional[torch.Tensor] = None,
493
+ position_ids: Optional[torch.LongTensor] = None,
494
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
495
+ inputs_embeds: Optional[torch.FloatTensor] = None,
496
+ use_cache: Optional[bool] = None,
497
+ output_attentions: Optional[bool] = None,
498
+ output_hidden_states: Optional[bool] = None,
499
+ return_dict: Optional[bool] = None,
500
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
501
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
502
+ output_hidden_states = (
503
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
504
+ )
505
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
506
+
507
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
508
+
509
+ # retrieve input_ids and inputs_embeds
510
+ if input_ids is not None and inputs_embeds is not None:
511
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
512
+ elif input_ids is not None:
513
+ batch_size, seq_length = input_ids.shape
514
+ elif inputs_embeds is not None:
515
+ batch_size, seq_length, _ = inputs_embeds.shape
516
+ else:
517
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
518
+
519
+ seq_length_with_past = seq_length
520
+ past_key_values_length = 0
521
+
522
+ if past_key_values is not None:
523
+ past_key_values_length = past_key_values[0][0].shape[2]
524
+ seq_length_with_past = seq_length_with_past + past_key_values_length
525
+
526
+ if position_ids is None:
527
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
528
+ position_ids = torch.arange(
529
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
530
+ )
531
+ position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
532
+ else:
533
+ position_ids = position_ids.view(-1, seq_length).long()
534
+
535
+ if inputs_embeds is None:
536
+ inputs_embeds = self.embed_tokens(input_ids)
537
+ # embed positions
538
+ if attention_mask is None:
539
+ attention_mask = torch.ones(
540
+ (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
541
+ )
542
+ attention_mask = self._prepare_decoder_attention_mask(
543
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
544
+ )
545
+
546
+ hidden_states = inputs_embeds
547
+
548
+ if self.gradient_checkpointing and self.training:
549
+ if use_cache:
550
+ logger.warning_once(
551
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
552
+ )
553
+ use_cache = False
554
+
555
+ # decoder layers
556
+ all_hidden_states = () if output_hidden_states else None
557
+ all_self_attns = () if output_attentions else None
558
+ next_decoder_cache = () if use_cache else None
559
+
560
+ for idx, decoder_layer in enumerate(self.layers):
561
+ if output_hidden_states:
562
+ all_hidden_states += (hidden_states,)
563
+
564
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
565
+
566
+ if self.gradient_checkpointing and self.training:
567
+
568
+ def create_custom_forward(module):
569
+ def custom_forward(*inputs):
570
+ # None for past_key_value
571
+ return module(*inputs, output_attentions, None)
572
+
573
+ return custom_forward
574
+
575
+ layer_outputs = torch.utils.checkpoint.checkpoint(
576
+ create_custom_forward(decoder_layer),
577
+ hidden_states,
578
+ attention_mask,
579
+ position_ids,
580
+ None,
581
+ )
582
+ else:
583
+ layer_outputs = decoder_layer(
584
+ hidden_states,
585
+ attention_mask=attention_mask,
586
+ position_ids=position_ids,
587
+ past_key_value=past_key_value,
588
+ output_attentions=output_attentions,
589
+ use_cache=use_cache,
590
+ )
591
+
592
+ hidden_states = layer_outputs[0]
593
+
594
+ if use_cache:
595
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
596
+
597
+ if output_attentions:
598
+ all_self_attns += (layer_outputs[1],)
599
+
600
+ hidden_states = self.norm(hidden_states)
601
+
602
+ # add hidden states from the last decoder layer
603
+ if output_hidden_states:
604
+ all_hidden_states += (hidden_states,)
605
+
606
+ next_cache = next_decoder_cache if use_cache else None
607
+ if not return_dict:
608
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
609
+ return BaseModelOutputWithPast(
610
+ last_hidden_state=hidden_states,
611
+ past_key_values=next_cache,
612
+ hidden_states=all_hidden_states,
613
+ attentions=all_self_attns,
614
+ )
615
+
616
+
617
+ class LlamaForCausalLM(LlamaPreTrainedModel):
618
+ _tied_weights_keys = ["lm_head.weight"]
619
+
620
+ def __init__(self, config):
621
+ super().__init__(config)
622
+ self.model = LlamaModel(config)
623
+
624
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
625
+
626
+ # Initialize weights and apply final processing
627
+ self.post_init()
628
+
629
+ def get_input_embeddings(self):
630
+ return self.model.embed_tokens
631
+
632
+ def set_input_embeddings(self, value):
633
+ self.model.embed_tokens = value
634
+
635
+ def get_output_embeddings(self):
636
+ return self.lm_head
637
+
638
+ def set_output_embeddings(self, new_embeddings):
639
+ self.lm_head = new_embeddings
640
+
641
+ def set_decoder(self, decoder):
642
+ self.model = decoder
643
+
644
+ def get_decoder(self):
645
+ return self.model
646
+
647
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
648
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
649
+ def forward(
650
+ self,
651
+ input_ids: torch.LongTensor = None,
652
+ attention_mask: Optional[torch.Tensor] = None,
653
+ position_ids: Optional[torch.LongTensor] = None,
654
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
655
+ inputs_embeds: Optional[torch.FloatTensor] = None,
656
+ labels: Optional[torch.LongTensor] = None,
657
+ use_cache: Optional[bool] = None,
658
+ output_attentions: Optional[bool] = None,
659
+ output_hidden_states: Optional[bool] = None,
660
+ return_dict: Optional[bool] = None,
661
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
662
+ r"""
663
+ Args:
664
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
665
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
666
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
667
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
668
+
669
+ Returns:
670
+
671
+ Example:
672
+
673
+ ```python
674
+ >>> from transformers import AutoTokenizer, LlamaForCausalLM
675
+
676
+ >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
677
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
678
+
679
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
680
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
681
+
682
+ >>> # Generate
683
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
684
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
685
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
686
+ ```"""
687
+
688
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
689
+ output_hidden_states = (
690
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
691
+ )
692
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
693
+
694
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
695
+ outputs = self.model(
696
+ input_ids=input_ids,
697
+ attention_mask=attention_mask,
698
+ position_ids=position_ids,
699
+ past_key_values=past_key_values,
700
+ inputs_embeds=inputs_embeds,
701
+ use_cache=use_cache,
702
+ output_attentions=output_attentions,
703
+ output_hidden_states=output_hidden_states,
704
+ return_dict=return_dict,
705
+ )
706
+
707
+ hidden_states = outputs[0]
708
+ logits = self.lm_head(hidden_states)
709
+
710
+ loss = None
711
+ if labels is not None:
712
+ # Shift so that tokens < n predict n
713
+ shift_logits = logits[..., :-1, :].contiguous()
714
+ shift_labels = labels[..., 1:].contiguous()
715
+ # Flatten the tokens
716
+ loss_fct = CrossEntropyLoss()
717
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
718
+ shift_labels = shift_labels.view(-1)
719
+ # Enable model parallelism
720
+ shift_labels = shift_labels.to(shift_logits.device)
721
+ loss = loss_fct(shift_logits, shift_labels)
722
+
723
+ if not return_dict:
724
+ output = (logits,) + outputs[1:]
725
+ return (loss,) + output if loss is not None else output
726
+
727
+ return CausalLMOutputWithPast(
728
+ loss=loss,
729
+ logits=logits,
730
+ past_key_values=outputs.past_key_values,
731
+ hidden_states=outputs.hidden_states,
732
+ attentions=outputs.attentions,
733
+ )
734
+
735
+ def prepare_inputs_for_generation(
736
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
737
+ ):
738
+ if past_key_values:
739
+ input_ids = input_ids[:, -1:]
740
+
741
+ position_ids = kwargs.get("position_ids", None)
742
+ if attention_mask is not None and position_ids is None:
743
+ # create position_ids on the fly for batch generation
744
+ position_ids = attention_mask.long().cumsum(-1) - 1
745
+ position_ids.masked_fill_(attention_mask == 0, 1)
746
+ if past_key_values:
747
+ position_ids = position_ids[:, -1].unsqueeze(-1)
748
+
749
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
750
+ if inputs_embeds is not None and past_key_values is None:
751
+ model_inputs = {"inputs_embeds": inputs_embeds}
752
+ else:
753
+ model_inputs = {"input_ids": input_ids}
754
+
755
+ model_inputs.update(
756
+ {
757
+ "position_ids": position_ids,
758
+ "past_key_values": past_key_values,
759
+ "use_cache": kwargs.get("use_cache"),
760
+ "attention_mask": attention_mask,
761
+ }
762
+ )
763
+ return model_inputs
764
+
765
+ @staticmethod
766
+ def _reorder_cache(past_key_values, beam_idx):
767
+ reordered_past = ()
768
+ for layer_past in past_key_values:
769
+ reordered_past += (
770
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
771
+ )
772
+ return reordered_past
773
+
774
+
775
+ @add_start_docstrings(
776
+ """
777
+ The LLaMa Model transformer with a sequence classification head on top (linear layer).
778
+
779
+ [`LlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal models
780
+ (e.g. GPT-2) do.
781
+
782
+ Since it does classification on the last token, it requires to know the position of the last token. If a
783
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
784
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
785
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
786
+ each row of the batch).
787
+ """,
788
+ LLAMA_START_DOCSTRING,
789
+ )
790
+ class LlamaForSequenceClassification(LlamaPreTrainedModel):
791
+ _keys_to_ignore_on_load_missing = [r"lm_head.weight"]
792
+
793
+ def __init__(self, config):
794
+ super().__init__(config)
795
+ self.num_labels = config.num_labels
796
+ self.model = LlamaModel(config)
797
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
798
+
799
+ # Initialize weights and apply final processing
800
+ self.post_init()
801
+
802
+ def get_input_embeddings(self):
803
+ return self.model.embed_tokens
804
+
805
+ def set_input_embeddings(self, value):
806
+ self.model.embed_tokens = value
807
+
808
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
809
+ def forward(
810
+ self,
811
+ input_ids: torch.LongTensor = None,
812
+ attention_mask: Optional[torch.Tensor] = None,
813
+ position_ids: Optional[torch.LongTensor] = None,
814
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
815
+ inputs_embeds: Optional[torch.FloatTensor] = None,
816
+ labels: Optional[torch.LongTensor] = None,
817
+ use_cache: Optional[bool] = None,
818
+ output_attentions: Optional[bool] = None,
819
+ output_hidden_states: Optional[bool] = None,
820
+ return_dict: Optional[bool] = None,
821
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
822
+ r"""
823
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
824
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
825
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
826
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
827
+ """
828
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
829
+
830
+ transformer_outputs = self.model(
831
+ input_ids,
832
+ attention_mask=attention_mask,
833
+ position_ids=position_ids,
834
+ past_key_values=past_key_values,
835
+ inputs_embeds=inputs_embeds,
836
+ use_cache=use_cache,
837
+ output_attentions=output_attentions,
838
+ output_hidden_states=output_hidden_states,
839
+ return_dict=return_dict,
840
+ )
841
+ hidden_states = transformer_outputs[0]
842
+ logits = self.score(hidden_states)
843
+
844
+ if input_ids is not None:
845
+ batch_size = input_ids.shape[0]
846
+ else:
847
+ batch_size = inputs_embeds.shape[0]
848
+
849
+ if self.config.pad_token_id is None and batch_size != 1:
850
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
851
+ if self.config.pad_token_id is None:
852
+ sequence_lengths = -1
853
+ else:
854
+ if input_ids is not None:
855
+ sequence_lengths = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device)
856
+ else:
857
+ sequence_lengths = -1
858
+
859
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
860
+
861
+ loss = None
862
+ if labels is not None:
863
+ labels = labels.to(logits.device)
864
+ if self.config.problem_type is None:
865
+ if self.num_labels == 1:
866
+ self.config.problem_type = "regression"
867
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
868
+ self.config.problem_type = "single_label_classification"
869
+ else:
870
+ self.config.problem_type = "multi_label_classification"
871
+
872
+ if self.config.problem_type == "regression":
873
+ loss_fct = MSELoss()
874
+ if self.num_labels == 1:
875
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
876
+ else:
877
+ loss = loss_fct(pooled_logits, labels)
878
+ elif self.config.problem_type == "single_label_classification":
879
+ loss_fct = CrossEntropyLoss()
880
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
881
+ elif self.config.problem_type == "multi_label_classification":
882
+ loss_fct = BCEWithLogitsLoss()
883
+ loss = loss_fct(pooled_logits, labels)
884
+ if not return_dict:
885
+ output = (pooled_logits,) + transformer_outputs[1:]
886
+ return ((loss,) + output) if loss is not None else output
887
+
888
+ return SequenceClassifierOutputWithPast(
889
+ loss=loss,
890
+ logits=pooled_logits,
891
+ past_key_values=transformer_outputs.past_key_values,
892
+ hidden_states=transformer_outputs.hidden_states,
893
+ attentions=transformer_outputs.attentions,
894
+ )
models/TheBloke_Pygmalion-13B-SuperHOT-8K-GPTQ/quantize_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bits": 4,
3
+ "group_size": 128,
4
+ "damp_percent": 0.01,
5
+ "desc_act": false,
6
+ "sym": true,
7
+ "true_sequential": true,
8
+ "model_file_base_name": "model"
9
+ }
models/TheBloke_Pygmalion-13B-SuperHOT-8K-GPTQ/special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
models/TheBloke_Pygmalion-13B-SuperHOT-8K-GPTQ/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
models/TheBloke_Pygmalion-13B-SuperHOT-8K-GPTQ/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
models/TheBloke_Pygmalion-13B-SuperHOT-8K-GPTQ/tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "model_max_length": 2048,
22
+ "pad_token": null,
23
+ "sp_model_kwargs": {},
24
+ "tokenizer_class": "LlamaTokenizer",
25
+ "unk_token": {
26
+ "__type": "AddedToken",
27
+ "content": "<unk>",
28
+ "lstrip": false,
29
+ "normalized": true,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }