Duplicate from cognitivecomputations/dolphin-2_6-phi-2
Browse filesCo-authored-by: Eric Hartford <[email protected]>
- .gitattributes +35 -0
- LICENSE +71 -0
- README.md +102 -0
- added_tokens.json +42 -0
- config.json +33 -0
- configs/phi-dolphin-qlora.yml +97 -0
- configuration_phi.py +62 -0
- generation_config.json +4 -0
- merges.txt +0 -0
- model-00001-of-00002.safetensors +3 -0
- model-00002-of-00002.safetensors +3 -0
- model.safetensors.index.json +334 -0
- modeling_phi.py +967 -0
- pytorch_model-00001-of-00002.bin +3 -0
- pytorch_model-00002-of-00002.bin +3 -0
- pytorch_model.bin.index.json +334 -0
- special_tokens_map.json +30 -0
- tokenizer.json +0 -0
- tokenizer_config.json +341 -0
- vocab.json +0 -0
.gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
LICENSE
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MICROSOFT RESEARCH LICENSE TERMS
|
2 |
+
|
3 |
+
IF YOU LIVE IN THE UNITED STATES, PLEASE READ THE “BINDING ARBITRATION AND CLASS ACTION WAIVER” SECTION BELOW. IT AFFECTS HOW DISPUTES ARE RESOLVED.
|
4 |
+
|
5 |
+
These license terms are an agreement between you and Microsoft Corporation (or one of its affiliates). They apply to the source code, object code, machine learning models, or data (collectively “Materials”) that accompany this license. IF YOU COMPLY WITH THESE LICENSE TERMS, YOU HAVE THE RIGHTS BELOW. BY USING THE MATERIALS, YOU ACCEPT THESE TERMS.
|
6 |
+
|
7 |
+
1) INSTALLATION AND USE RIGHTS TO THE MATERIALS.
|
8 |
+
|
9 |
+
Subject to the terms of this agreement, you have the below rights, if applicable, to use the Materials solely for non-commercial, non-revenue generating, research purposes:
|
10 |
+
|
11 |
+
a) Source Code. If source code is included, you may use and modify the source code, but you may not distribute the source code.
|
12 |
+
|
13 |
+
b) Object Code. If object code is included, you may use the object code, but you may not distribute the object code.
|
14 |
+
|
15 |
+
c) Models. If machine learning model(s) are included, you may use the model(s), but you may not distribute the models.
|
16 |
+
|
17 |
+
d) Data. If data is included, you may use and modify the data, but your use and modification must be consistent with the consent under which the data was provided and/or gathered and you may not distribute the data or your modifications to the data.
|
18 |
+
|
19 |
+
2) SCOPE OF LICENSE. The Materials are licensed, not sold. Microsoft reserves all other rights. Unless applicable law gives you more rights despite this limitation, you will not (and have no right to):
|
20 |
+
|
21 |
+
a) work around any technical limitations in the Materials that only allow you to use it in certain ways;
|
22 |
+
|
23 |
+
b) reverse engineer, decompile or disassemble the Materials;
|
24 |
+
|
25 |
+
c) remove, minimize, block, or modify any notices of Microsoft or its suppliers in the Materials;
|
26 |
+
|
27 |
+
d) use the Materials in any way that is against the law or to create or propagate malware; or
|
28 |
+
|
29 |
+
e) share, publish, distribute or lend the Materials, provide the Materials as a stand-alone hosted solution for others to use, or transfer the Materials or this agreement to any third party.
|
30 |
+
|
31 |
+
3) PERSONAL DATA. If the data (set forth in Section 1(c) above) includes or is found to include any data that enables any ability to identify an individual (“Personal Data”), you will not use such Personal Data for any purpose other than was authorized and consented to by the data subject/research participant. You will not use Personal Data to contact any person. You will keep Personal Data in strict confidence. You will not share any Personal Data that is collected or in your possession with any third party for any reason and as required under the original consent agreement. Further, you will destroy the Personal Data and any backup or copies, immediately upon the completion of your research.
|
32 |
+
|
33 |
+
4) LICENSE TO MICROSOFT. Notwithstanding the limitations in Section 1, you may distribute your modifications back to Microsoft, and if you do provide Microsoft with modifications of the Materials, you hereby grant Microsoft, without any restrictions or limitations, a non-exclusive, perpetual, irrevocable, royalty-free, assignable and sub-licensable license, to reproduce, publicly perform or display, install, use, modify, post, distribute, make and have made, sell and transfer such modifications and derivatives for any purpose.
|
34 |
+
|
35 |
+
5) PUBLICATION. You may publish (or present papers or articles) on your results from using the Materials provided that no material or substantial portion of the Materials is included in any such publication or presentation.
|
36 |
+
|
37 |
+
6) FEEDBACK. Any feedback about the Materials provided by you to us is voluntarily given, and Microsoft shall be free to use the feedback as it sees fit without obligation or restriction of any kind, even if the
|
38 |
+
|
39 |
+
feedback is designated by you as confidential. Such feedback shall be considered a contribution and licensed to Microsoft under the terms of Section 4 above.
|
40 |
+
|
41 |
+
7) EXPORT RESTRICTIONS. You must comply with all domestic and international export laws and regulations that apply to the Materials, which include restrictions on destinations, end users, and end use. For further information on export restrictions, visit (aka.ms/exporting).
|
42 |
+
|
43 |
+
8) SUPPORT SERVICES. Microsoft is not obligated under this agreement to provide any support services for the Materials. Any support provided is “as is”, “with all faults”, and without warranty of any kind.
|
44 |
+
|
45 |
+
9) BINDING ARBITRATION AND CLASS ACTION WAIVER. This Section applies if you live in (or, if a business, your principal place of business is in) the United States. If you and Microsoft have a dispute, you and Microsoft agree to try for 60 days to resolve it informally. If you and Microsoft can’t, you and Microsoft agree to binding individual arbitration before the American Arbitration Association under the Federal Arbitration Act (“FAA”), and not to sue in court in front of a judge or jury. Instead, a neutral arbitrator will decide. Class action lawsuits, class-wide arbitrations, private attorney-general actions, and any other proceeding where someone acts in a representative capacity are not allowed; nor is combining individual proceedings without the consent of all parties. The complete Arbitration Agreement contains more terms and is at aka.ms/arb-agreement-1. You and Microsoft agree to these terms.
|
46 |
+
|
47 |
+
10) ENTIRE AGREEMENT. This agreement, and any other terms Microsoft may provide for supplements, updates, or third-party applications, is the entire agreement for the Materials.
|
48 |
+
|
49 |
+
11) APPLICABLE LAW AND PLACE TO RESOLVE DISPUTES. If you acquired the Materials in the United States or Canada, the laws of the state or province where you live (or, if a business, where your principal place of business is located) govern the interpretation of this agreement, claims for its breach, and all other claims (including consumer protection, unfair competition, and tort claims), regardless of conflict of laws principles, except that the FAA governs everything related to arbitration. If you acquired the Materials in any other country, its laws apply, except that the FAA governs everything related to arbitration. If U.S. federal jurisdiction exists, you and Microsoft consent to exclusive jurisdiction and venue in the federal court in King County, Washington for all disputes heard in court (excluding arbitration). If not, you and Microsoft consent to exclusive jurisdiction and venue in the Superior Court of King County, Washington for all disputes heard in court (excluding arbitration).
|
50 |
+
|
51 |
+
12) CONSUMER RIGHTS; REGIONAL VARIATIONS. This agreement describes certain legal rights. You may have other rights, including consumer rights, under the laws of your state, province, or country. Separate and apart from your relationship with Microsoft, you may also have rights with respect to the party from which you acquired the Materials. This agreement does not change those other rights if the laws of your state, province, or country do not permit it to do so. For example, if you acquired the Materials in one of the below regions, or mandatory country law applies, then the following provisions apply to you:
|
52 |
+
|
53 |
+
a) Australia. You have statutory guarantees under the Australian Consumer Law and nothing in this agreement is intended to affect those rights.
|
54 |
+
|
55 |
+
b) Canada. If you acquired this software in Canada, you may stop receiving updates by turning off the automatic update feature, disconnecting your device from the Internet (if and when you re-connect to the Internet, however, the Materials will resume checking for and installing updates), or uninstalling the Materials. The product documentation, if any, may also specify how to turn off updates for your specific device or software.
|
56 |
+
|
57 |
+
c) Germany and Austria.
|
58 |
+
|
59 |
+
i. Warranty. The properly licensed software will perform substantially as described in any Microsoft materials that accompany the Materials. However, Microsoft gives no contractual guarantee in relation to the licensed software.
|
60 |
+
|
61 |
+
ii. Limitation of Liability. In case of intentional conduct, gross negligence, claims based on the Product Liability Act, as well as, in case of death or personal or physical injury, Microsoft is liable according to the statutory law.
|
62 |
+
|
63 |
+
Subject to the foregoing clause (ii), Microsoft will only be liable for slight negligence if Microsoft is in breach of such material contractual obligations, the fulfillment of which facilitate the due performance of this agreement, the breach of which would endanger the purpose of this agreement and the compliance with which a party may constantly trust in (so-called "cardinal obligations"). In other cases of slight negligence, Microsoft will not be liable for slight negligence.
|
64 |
+
|
65 |
+
13) DISCLAIMER OF WARRANTY. THE MATERIALS ARE LICENSED “AS IS.” YOU BEAR THE RISK OF USING THEM. MICROSOFT GIVES NO EXPRESS WARRANTIES, GUARANTEES, OR CONDITIONS. TO THE EXTENT PERMITTED UNDER APPLICABLE LAWS, MICROSOFT EXCLUDES ALL IMPLIED WARRANTIES, INCLUDING MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT.
|
66 |
+
|
67 |
+
14) LIMITATION ON AND EXCLUSION OF DAMAGES. IF YOU HAVE ANY BASIS FOR RECOVERING DAMAGES DESPITE THE PRECEDING DISCLAIMER OF WARRANTY, YOU CAN RECOVER FROM MICROSOFT AND ITS SUPPLIERS ONLY DIRECT DAMAGES UP TO U.S. $5.00. YOU CANNOT RECOVER ANY OTHER DAMAGES, INCLUDING CONSEQUENTIAL, LOST PROFITS, SPECIAL, INDIRECT OR INCIDENTAL DAMAGES.
|
68 |
+
|
69 |
+
This limitation applies to (a) anything related to the Materials, services, content (including code) on third party Internet sites, or third party applications; and (b) claims for breach of contract, warranty, guarantee, or condition; strict liability, negligence, or other tort; or any other claim; in each case to the extent permitted by applicable law.
|
70 |
+
|
71 |
+
It also applies even if Microsoft knew or should have known about the possibility of the damages. The above limitation or exclusion may not apply to you because your state, province, or country may not allow the exclusion or limitation of incidental, consequential, or other damages.
|
README.md
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: mit
|
3 |
+
license_name: microsoft-research-license
|
4 |
+
license_link: LICENSE
|
5 |
+
datasets:
|
6 |
+
- ehartford/dolphin
|
7 |
+
- jondurbin/airoboros-2.2.1
|
8 |
+
- ehartford/dolphin-coder
|
9 |
+
- teknium/openhermes
|
10 |
+
- ise-uiuc/Magicoder-OSS-Instruct-75K
|
11 |
+
- ise-uiuc/Magicoder-Evol-Instruct-110K
|
12 |
+
- LDJnr/Capybara
|
13 |
+
language:
|
14 |
+
- en
|
15 |
+
---
|
16 |
+
|
17 |
+
Dolphin 2.6 Phi-2 🐬
|
18 |
+
|
19 |
+
Eric Hartford and Fernando Fernandes
|
20 |
+
|
21 |
+
Joing our Discord https://discord.gg/vT3sktQ3zb
|
22 |
+
|
23 |
+
<img src="https://cdn-uploads.huggingface.co/production/uploads/63111b2d88942700629f5771/ldkN1J0WIDQwU4vutGYiD.png" width="600" />
|
24 |
+
|
25 |
+
This model's training was sponsored by [convai](https://www.convai.com/).
|
26 |
+
|
27 |
+
This model is based on [Phi-2](https://huggingface.co/microsoft/phi-2) and is governed by MIT licence
|
28 |
+
|
29 |
+
trust_remote_code is required.
|
30 |
+
|
31 |
+
New in 2.6
|
32 |
+
- Fixed a training configuration issue that improved the quality a lot
|
33 |
+
- Due to popular demand, added back samantha-based empathy data
|
34 |
+
- Replaced synthia and pure-dove with Capybara
|
35 |
+
|
36 |
+
This model is uncensored. I have filtered the dataset to remove alignment and bias. This makes the model more compliant. You are advised to implement your own alignment layer before exposing the model as a service. It will be highly compliant to any requests, even unethical ones. Please read my blog post about uncensored models. https://erichartford.com/uncensored-models
|
37 |
+
You are responsible for any content you create using this model. Enjoy responsibly.
|
38 |
+
|
39 |
+
## Training
|
40 |
+
It took 2 days to train 3 epochs on 4x A100s using qLoRA and Axolotl
|
41 |
+
|
42 |
+
Prompt format:
|
43 |
+
This model uses ChatML prompt format.
|
44 |
+
```
|
45 |
+
<|im_start|>system
|
46 |
+
You are Dolphin, a helpful AI assistant.<|im_end|>
|
47 |
+
<|im_start|>user
|
48 |
+
{prompt}<|im_end|>
|
49 |
+
<|im_start|>assistant
|
50 |
+
|
51 |
+
```
|
52 |
+
|
53 |
+
Example:
|
54 |
+
```
|
55 |
+
<|im_start|>system
|
56 |
+
You are an AI assistant expert at dolphin training<|im_end|>
|
57 |
+
<|im_start|>user
|
58 |
+
Please give ideas and a detailed plan about how to assemble and train an army of dolphin companions to swim me anywhere I want to go and protect me from my enemies and bring me fish to eat.<|im_end|>
|
59 |
+
<|im_start|>assistant
|
60 |
+
Assembling, training, and utilizing dolphins as your companions for transportation, protection, and fishing is no small task. However, with careful planning and execution, it can be accomplished. Here's a detailed guide on how to achieve this:
|
61 |
+
|
62 |
+
1. **Acquiring Dolphins**: Finding dolphins isn't easy, but you could potentially find them in the wild or at aquariums. For ethical reasons, we suggest acquiring adult dolphins that have been born into captivity. They may not have had as much exposure to humans as their wild counterparts, which makes them easier to handle.
|
63 |
+
|
64 |
+
2. **Creating a Safe Environment**: Build a large pool or enclosure where these dolphins can live comfortably while you train them. It should mimic their natural environment as closely as possible. Include elements like rocks (for resting), seaweed (for hiding), and water currents (to simulate ocean conditions).
|
65 |
+
|
66 |
+
3. **Training Sessions**: Begin by teaching basic commands such as "come", "stay", "sit", "roll over". Use positive reinforcement methods like treats and praise to encourage good behavior. Be patient; dolphins are intelligent creatures who need time to understand what they're being asked to do.
|
67 |
+
|
68 |
+
4. **Transportation Training**: Once your dolphins have mastered basic commands, start integrating travel into the mix. Place floating objects around the pool to represent different destinations. The goal here would be to get your dolphins to recognize these items and associate them with specific locations. Start with short trips and gradually increase the distance.
|
69 |
+
|
70 |
+
5. **Protection Skills**: To teach them protective skills, introduce simulated threats into the pool. This might involve using inflatable obstacles or having another trained animal pretend to attack. The dolphins will learn to react appropriately to these situations.
|
71 |
+
|
72 |
+
6. **Fishing Skills**: Teaching dolphins to catch fish involves introducing nets or toys into the pool and encouraging them to use their mouths to retrieve the object. Reward them when they successfully catch something.
|
73 |
+
|
74 |
+
7. **Socialization**: Dolphins thrive in social environments so ensure they spend plenty of time interacting with each other during their training sessions. This can help reduce stress levels and make training more effective.
|
75 |
+
|
76 |
+
8. **Dietary Needs**: Dolphins require a diet high in protein, fats, and minerals. Feed them fish rich in these nutrients several times per day. If necessary, consult with a marine biologist or veterinarian experienced in aquatic diets for advice.
|
77 |
+
|
78 |
+
```
|
79 |
+
|
80 |
+
## Gratitude
|
81 |
+
- This model was made possible by the generous sponsorship of [Convai](https://www.convai.com/).
|
82 |
+
- Huge thank you to Microsoft for training and publishing the weights of Phi-2
|
83 |
+
- Thank you to Microsoft for authoring the Orca paper and inspiring this work.
|
84 |
+
- HUGE Thank you to the dataset authors: @jondurbin, @ise-uiuc, @teknium, @LDJnr and @migtissera
|
85 |
+
- And HUGE thanks to @winglian and the Axolotl contributors for making the best training framework!
|
86 |
+
- [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)
|
87 |
+
- Thank you to all the other people in the Open Source AI community who have taught me and helped me along the way.
|
88 |
+
|
89 |
+
## Example Output
|
90 |
+
|
91 |
+
![image/png](https://cdn-uploads.huggingface.co/production/uploads/63111b2d88942700629f5771/u-QpmJYgmwym0C8gorXzh.png)
|
92 |
+
|
93 |
+
## Future Plans
|
94 |
+
Dolphin 3.0 dataset is in progress, and will include:
|
95 |
+
- enhanced general chat use-cases
|
96 |
+
- enhanced structured output
|
97 |
+
- enhanced Agent cases like Autogen, Memgpt, Functions
|
98 |
+
- enhanced role-playing
|
99 |
+
|
100 |
+
[If you would like to financially support my efforts](https://ko-fi.com/erichartford)
|
101 |
+
|
102 |
+
[swag](https://fa7113.myshopify.com/)
|
added_tokens.json
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"\t\t": 50294,
|
3 |
+
"\t\t\t": 50293,
|
4 |
+
"\t\t\t\t": 50292,
|
5 |
+
"\t\t\t\t\t": 50291,
|
6 |
+
"\t\t\t\t\t\t": 50290,
|
7 |
+
"\t\t\t\t\t\t\t": 50289,
|
8 |
+
"\t\t\t\t\t\t\t\t": 50288,
|
9 |
+
"\t\t\t\t\t\t\t\t\t": 50287,
|
10 |
+
" ": 50286,
|
11 |
+
" ": 50285,
|
12 |
+
" ": 50284,
|
13 |
+
" ": 50283,
|
14 |
+
" ": 50282,
|
15 |
+
" ": 50281,
|
16 |
+
" ": 50280,
|
17 |
+
" ": 50279,
|
18 |
+
" ": 50278,
|
19 |
+
" ": 50277,
|
20 |
+
" ": 50276,
|
21 |
+
" ": 50275,
|
22 |
+
" ": 50274,
|
23 |
+
" ": 50273,
|
24 |
+
" ": 50272,
|
25 |
+
" ": 50271,
|
26 |
+
" ": 50270,
|
27 |
+
" ": 50269,
|
28 |
+
" ": 50268,
|
29 |
+
" ": 50267,
|
30 |
+
" ": 50266,
|
31 |
+
" ": 50265,
|
32 |
+
" ": 50264,
|
33 |
+
" ": 50263,
|
34 |
+
" ": 50262,
|
35 |
+
" ": 50261,
|
36 |
+
" ": 50260,
|
37 |
+
" ": 50259,
|
38 |
+
" ": 50258,
|
39 |
+
" ": 50257,
|
40 |
+
"<|im_end|>": 50295,
|
41 |
+
"<|im_start|>": 50296
|
42 |
+
}
|
config.json
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "microsoft/phi-2",
|
3 |
+
"activation_function": "gelu_new",
|
4 |
+
"architectures": [
|
5 |
+
"PhiForCausalLM"
|
6 |
+
],
|
7 |
+
"attn_pdrop": 0.0,
|
8 |
+
"auto_map": {
|
9 |
+
"AutoConfig": "configuration_phi.PhiConfig",
|
10 |
+
"AutoModelForCausalLM": "modeling_phi.PhiForCausalLM"
|
11 |
+
},
|
12 |
+
"embd_pdrop": 0.0,
|
13 |
+
"flash_attn": false,
|
14 |
+
"flash_rotary": false,
|
15 |
+
"fused_dense": false,
|
16 |
+
"img_processor": null,
|
17 |
+
"initializer_range": 0.02,
|
18 |
+
"layer_norm_epsilon": 1e-05,
|
19 |
+
"model_type": "phi-msft",
|
20 |
+
"n_embd": 2560,
|
21 |
+
"n_head": 32,
|
22 |
+
"n_head_kv": null,
|
23 |
+
"n_inner": null,
|
24 |
+
"n_layer": 32,
|
25 |
+
"n_positions": 2048,
|
26 |
+
"resid_pdrop": 0.1,
|
27 |
+
"rotary_dim": 32,
|
28 |
+
"tie_word_embeddings": false,
|
29 |
+
"torch_dtype": "float16",
|
30 |
+
"transformers_version": "4.37.0.dev0",
|
31 |
+
"use_cache": false,
|
32 |
+
"vocab_size": 51200
|
33 |
+
}
|
configs/phi-dolphin-qlora.yml
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
base_model: microsoft/phi-2
|
2 |
+
model_type: AutoModelForCausalLM
|
3 |
+
tokenizer_type: AutoTokenizer
|
4 |
+
is_llama_derived_model: false
|
5 |
+
trust_remote_code: true
|
6 |
+
|
7 |
+
load_in_8bit: false
|
8 |
+
load_in_4bit: true
|
9 |
+
strict: false
|
10 |
+
|
11 |
+
datasets:
|
12 |
+
- path: /workspace/datasets/dolphin/dolphin201.jsonl
|
13 |
+
type: alpaca_w_system.load_open_orca_chatml
|
14 |
+
- path: /workspace/datasets/dolphin-coder-translate.jsonl
|
15 |
+
type: alpaca_w_system.load_open_orca_chatml
|
16 |
+
- path: /workspace/datasets/dolphin-coder-codegen.jsonl
|
17 |
+
type: alpaca_w_system.load_open_orca_chatml
|
18 |
+
- path: /workspace/datasets/data-evol_instruct-decontaminated-converted.jsonl
|
19 |
+
type: alpaca_w_system.load_open_orca_chatml
|
20 |
+
- path: /workspace/datasets/data-oss_instruct-decontaminated-converted.jsonl
|
21 |
+
type: alpaca_w_system.load_open_orca_chatml
|
22 |
+
- path: /workspace/datasets/CapybaraPure_Decontaminated-converted.jsonl
|
23 |
+
type: sharegpt
|
24 |
+
conversation: chatml
|
25 |
+
- path: /workspace/datasets/not_samantha_norefusals.jsonl
|
26 |
+
type: sharegpt
|
27 |
+
conversation: chatml
|
28 |
+
- path: /workspace/datasets/openhermes.json
|
29 |
+
type: alpaca
|
30 |
+
prompt_style: chatml
|
31 |
+
|
32 |
+
dataset_prepared_path: larp
|
33 |
+
val_set_size: 0.05
|
34 |
+
output_dir: /workspace/dolphin-2.6-phi-2/
|
35 |
+
|
36 |
+
sequence_len: 2048
|
37 |
+
sample_packing: true
|
38 |
+
pad_to_sequence_len: true
|
39 |
+
|
40 |
+
adapter: qlora
|
41 |
+
lora_model_dir:
|
42 |
+
lora_r: 64
|
43 |
+
lora_alpha: 32
|
44 |
+
lora_dropout: 0.05
|
45 |
+
lora_target_linear: true
|
46 |
+
lora_fan_in_fan_out:
|
47 |
+
lora_modules_to_save:
|
48 |
+
- embed_tokens
|
49 |
+
- lm_head
|
50 |
+
|
51 |
+
wandb_project: dolphin
|
52 |
+
wandb_entity:
|
53 |
+
wandb_watch:
|
54 |
+
wandb_name:
|
55 |
+
wandb_log_model:
|
56 |
+
|
57 |
+
gradient_accumulation_steps: 16
|
58 |
+
micro_batch_size: 1
|
59 |
+
num_epochs: 4
|
60 |
+
optimizer: paged_adamw_8bit
|
61 |
+
adam_beta1: 0.9
|
62 |
+
adam_beta2: 0.999
|
63 |
+
adam_epsilon: 0.00001
|
64 |
+
max_grad_norm: 1000.0
|
65 |
+
lr_scheduler: cosine
|
66 |
+
learning_rate: 2e-4
|
67 |
+
|
68 |
+
train_on_inputs: false
|
69 |
+
group_by_length:
|
70 |
+
bf16: false
|
71 |
+
fp16: true
|
72 |
+
tf32: false
|
73 |
+
|
74 |
+
gradient_checkpointing:
|
75 |
+
early_stopping_patience:
|
76 |
+
resume_from_checkpoint:
|
77 |
+
local_rank:
|
78 |
+
logging_steps: 1
|
79 |
+
xformers_attention:
|
80 |
+
flash_attention: true
|
81 |
+
|
82 |
+
warmup_steps: 5
|
83 |
+
evals_per_epoch: 0
|
84 |
+
save_steps: 0.01
|
85 |
+
save_safetensors: false
|
86 |
+
save_total_limit: 2
|
87 |
+
debug:
|
88 |
+
deepspeed: deepspeed/zero2.json
|
89 |
+
weight_decay: 0.01
|
90 |
+
fsdp:
|
91 |
+
fsdp_config:
|
92 |
+
resize_token_embeddings_to_32x: true
|
93 |
+
special_tokens:
|
94 |
+
eos_token: "<|im_end|>"
|
95 |
+
pad_token: "<|endoftext|>"
|
96 |
+
tokens:
|
97 |
+
- "<|im_start|>"
|
configuration_phi.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# Licensed under the MIT license.
|
3 |
+
|
4 |
+
import math
|
5 |
+
from typing import Optional
|
6 |
+
|
7 |
+
from transformers import PretrainedConfig
|
8 |
+
|
9 |
+
|
10 |
+
class PhiConfig(PretrainedConfig):
|
11 |
+
"""Phi configuration."""
|
12 |
+
|
13 |
+
model_type = "phi-msft"
|
14 |
+
attribute_map = {
|
15 |
+
"max_position_embeddings": "n_positions",
|
16 |
+
"hidden_size": "n_embd",
|
17 |
+
"num_attention_heads": "n_head",
|
18 |
+
"num_hidden_layers": "n_layer",
|
19 |
+
}
|
20 |
+
|
21 |
+
def __init__(
|
22 |
+
self,
|
23 |
+
vocab_size: int = 50304,
|
24 |
+
n_positions: int = 2048,
|
25 |
+
n_embd: int = 1024,
|
26 |
+
n_layer: int = 20,
|
27 |
+
n_inner: Optional[int] = None,
|
28 |
+
n_head: int = 16,
|
29 |
+
n_head_kv: Optional[int] = None,
|
30 |
+
rotary_dim: Optional[int] = 32,
|
31 |
+
activation_function: Optional[str] = "gelu_new",
|
32 |
+
flash_attn: bool = False,
|
33 |
+
flash_rotary: bool = False,
|
34 |
+
fused_dense: bool = False,
|
35 |
+
attn_pdrop: float = 0.0,
|
36 |
+
embd_pdrop: float = 0.0,
|
37 |
+
resid_pdrop: float = 0.0,
|
38 |
+
layer_norm_epsilon: float = 1e-5,
|
39 |
+
initializer_range: float = 0.02,
|
40 |
+
tie_word_embeddings: bool = False,
|
41 |
+
pad_vocab_size_multiple: int = 64,
|
42 |
+
**kwargs
|
43 |
+
) -> None:
|
44 |
+
self.vocab_size = int(math.ceil(vocab_size / pad_vocab_size_multiple) * pad_vocab_size_multiple)
|
45 |
+
self.n_positions = n_positions
|
46 |
+
self.n_embd = n_embd
|
47 |
+
self.n_layer = n_layer
|
48 |
+
self.n_inner = n_inner
|
49 |
+
self.n_head = n_head
|
50 |
+
self.n_head_kv = n_head_kv
|
51 |
+
self.rotary_dim = min(rotary_dim, n_embd // n_head)
|
52 |
+
self.activation_function = activation_function
|
53 |
+
self.flash_attn = flash_attn
|
54 |
+
self.flash_rotary = flash_rotary
|
55 |
+
self.fused_dense = fused_dense
|
56 |
+
self.attn_pdrop = attn_pdrop
|
57 |
+
self.embd_pdrop = embd_pdrop
|
58 |
+
self.resid_pdrop = resid_pdrop
|
59 |
+
self.layer_norm_epsilon = layer_norm_epsilon
|
60 |
+
self.initializer_range = initializer_range
|
61 |
+
|
62 |
+
super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
|
generation_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"transformers_version": "4.37.0.dev0"
|
4 |
+
}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
model-00001-of-00002.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ebab78005fb6449881f2c2a2f5a1b97ce34e94a5ef0e666ad0101fe75c83124d
|
3 |
+
size 4982467864
|
model-00002-of-00002.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1586f8021b2bfb5690ee6f6ddd5b25c4c8609d6a2dcce55c4258de1f9ed75262
|
3 |
+
size 583815616
|
model.safetensors.index.json
ADDED
@@ -0,0 +1,334 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_size": 5566248960
|
4 |
+
},
|
5 |
+
"weight_map": {
|
6 |
+
"lm_head.linear.bias": "model-00002-of-00002.safetensors",
|
7 |
+
"lm_head.linear.lora_A.default.weight": "model-00002-of-00002.safetensors",
|
8 |
+
"lm_head.linear.lora_B.default.weight": "model-00002-of-00002.safetensors",
|
9 |
+
"lm_head.linear.weight": "model-00002-of-00002.safetensors",
|
10 |
+
"lm_head.ln.bias": "model-00002-of-00002.safetensors",
|
11 |
+
"lm_head.ln.weight": "model-00002-of-00002.safetensors",
|
12 |
+
"transformer.embd.wte.weight": "model-00001-of-00002.safetensors",
|
13 |
+
"transformer.h.0.ln.bias": "model-00001-of-00002.safetensors",
|
14 |
+
"transformer.h.0.ln.weight": "model-00001-of-00002.safetensors",
|
15 |
+
"transformer.h.0.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
16 |
+
"transformer.h.0.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
17 |
+
"transformer.h.0.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
18 |
+
"transformer.h.0.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
19 |
+
"transformer.h.0.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
20 |
+
"transformer.h.0.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
21 |
+
"transformer.h.0.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
22 |
+
"transformer.h.0.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
23 |
+
"transformer.h.1.ln.bias": "model-00001-of-00002.safetensors",
|
24 |
+
"transformer.h.1.ln.weight": "model-00001-of-00002.safetensors",
|
25 |
+
"transformer.h.1.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
26 |
+
"transformer.h.1.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
27 |
+
"transformer.h.1.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
28 |
+
"transformer.h.1.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
29 |
+
"transformer.h.1.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
30 |
+
"transformer.h.1.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
31 |
+
"transformer.h.1.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
32 |
+
"transformer.h.1.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
33 |
+
"transformer.h.10.ln.bias": "model-00001-of-00002.safetensors",
|
34 |
+
"transformer.h.10.ln.weight": "model-00001-of-00002.safetensors",
|
35 |
+
"transformer.h.10.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
36 |
+
"transformer.h.10.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
37 |
+
"transformer.h.10.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
38 |
+
"transformer.h.10.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
39 |
+
"transformer.h.10.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
40 |
+
"transformer.h.10.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
41 |
+
"transformer.h.10.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
42 |
+
"transformer.h.10.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
43 |
+
"transformer.h.11.ln.bias": "model-00001-of-00002.safetensors",
|
44 |
+
"transformer.h.11.ln.weight": "model-00001-of-00002.safetensors",
|
45 |
+
"transformer.h.11.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
46 |
+
"transformer.h.11.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
47 |
+
"transformer.h.11.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
48 |
+
"transformer.h.11.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
49 |
+
"transformer.h.11.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
50 |
+
"transformer.h.11.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
51 |
+
"transformer.h.11.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
52 |
+
"transformer.h.11.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
53 |
+
"transformer.h.12.ln.bias": "model-00001-of-00002.safetensors",
|
54 |
+
"transformer.h.12.ln.weight": "model-00001-of-00002.safetensors",
|
55 |
+
"transformer.h.12.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
56 |
+
"transformer.h.12.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
57 |
+
"transformer.h.12.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
58 |
+
"transformer.h.12.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
59 |
+
"transformer.h.12.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
60 |
+
"transformer.h.12.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
61 |
+
"transformer.h.12.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
62 |
+
"transformer.h.12.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
63 |
+
"transformer.h.13.ln.bias": "model-00001-of-00002.safetensors",
|
64 |
+
"transformer.h.13.ln.weight": "model-00001-of-00002.safetensors",
|
65 |
+
"transformer.h.13.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
66 |
+
"transformer.h.13.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
67 |
+
"transformer.h.13.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
68 |
+
"transformer.h.13.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
69 |
+
"transformer.h.13.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
70 |
+
"transformer.h.13.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
71 |
+
"transformer.h.13.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
72 |
+
"transformer.h.13.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
73 |
+
"transformer.h.14.ln.bias": "model-00001-of-00002.safetensors",
|
74 |
+
"transformer.h.14.ln.weight": "model-00001-of-00002.safetensors",
|
75 |
+
"transformer.h.14.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
76 |
+
"transformer.h.14.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
77 |
+
"transformer.h.14.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
78 |
+
"transformer.h.14.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
79 |
+
"transformer.h.14.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
80 |
+
"transformer.h.14.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
81 |
+
"transformer.h.14.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
82 |
+
"transformer.h.14.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
83 |
+
"transformer.h.15.ln.bias": "model-00001-of-00002.safetensors",
|
84 |
+
"transformer.h.15.ln.weight": "model-00001-of-00002.safetensors",
|
85 |
+
"transformer.h.15.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
86 |
+
"transformer.h.15.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
87 |
+
"transformer.h.15.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
88 |
+
"transformer.h.15.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
89 |
+
"transformer.h.15.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
90 |
+
"transformer.h.15.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
91 |
+
"transformer.h.15.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
92 |
+
"transformer.h.15.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
93 |
+
"transformer.h.16.ln.bias": "model-00001-of-00002.safetensors",
|
94 |
+
"transformer.h.16.ln.weight": "model-00001-of-00002.safetensors",
|
95 |
+
"transformer.h.16.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
96 |
+
"transformer.h.16.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
97 |
+
"transformer.h.16.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
98 |
+
"transformer.h.16.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
99 |
+
"transformer.h.16.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
100 |
+
"transformer.h.16.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
101 |
+
"transformer.h.16.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
102 |
+
"transformer.h.16.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
103 |
+
"transformer.h.17.ln.bias": "model-00001-of-00002.safetensors",
|
104 |
+
"transformer.h.17.ln.weight": "model-00001-of-00002.safetensors",
|
105 |
+
"transformer.h.17.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
106 |
+
"transformer.h.17.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
107 |
+
"transformer.h.17.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
108 |
+
"transformer.h.17.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
109 |
+
"transformer.h.17.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
110 |
+
"transformer.h.17.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
111 |
+
"transformer.h.17.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
112 |
+
"transformer.h.17.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
113 |
+
"transformer.h.18.ln.bias": "model-00001-of-00002.safetensors",
|
114 |
+
"transformer.h.18.ln.weight": "model-00001-of-00002.safetensors",
|
115 |
+
"transformer.h.18.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
116 |
+
"transformer.h.18.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
117 |
+
"transformer.h.18.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
118 |
+
"transformer.h.18.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
119 |
+
"transformer.h.18.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
120 |
+
"transformer.h.18.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
121 |
+
"transformer.h.18.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
122 |
+
"transformer.h.18.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
123 |
+
"transformer.h.19.ln.bias": "model-00001-of-00002.safetensors",
|
124 |
+
"transformer.h.19.ln.weight": "model-00001-of-00002.safetensors",
|
125 |
+
"transformer.h.19.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
126 |
+
"transformer.h.19.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
127 |
+
"transformer.h.19.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
128 |
+
"transformer.h.19.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
129 |
+
"transformer.h.19.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
130 |
+
"transformer.h.19.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
131 |
+
"transformer.h.19.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
132 |
+
"transformer.h.19.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
133 |
+
"transformer.h.2.ln.bias": "model-00001-of-00002.safetensors",
|
134 |
+
"transformer.h.2.ln.weight": "model-00001-of-00002.safetensors",
|
135 |
+
"transformer.h.2.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
136 |
+
"transformer.h.2.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
137 |
+
"transformer.h.2.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
138 |
+
"transformer.h.2.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
139 |
+
"transformer.h.2.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
140 |
+
"transformer.h.2.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
141 |
+
"transformer.h.2.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
142 |
+
"transformer.h.2.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
143 |
+
"transformer.h.20.ln.bias": "model-00001-of-00002.safetensors",
|
144 |
+
"transformer.h.20.ln.weight": "model-00001-of-00002.safetensors",
|
145 |
+
"transformer.h.20.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
146 |
+
"transformer.h.20.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
147 |
+
"transformer.h.20.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
148 |
+
"transformer.h.20.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
149 |
+
"transformer.h.20.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
150 |
+
"transformer.h.20.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
151 |
+
"transformer.h.20.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
152 |
+
"transformer.h.20.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
153 |
+
"transformer.h.21.ln.bias": "model-00001-of-00002.safetensors",
|
154 |
+
"transformer.h.21.ln.weight": "model-00001-of-00002.safetensors",
|
155 |
+
"transformer.h.21.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
156 |
+
"transformer.h.21.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
157 |
+
"transformer.h.21.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
158 |
+
"transformer.h.21.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
159 |
+
"transformer.h.21.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
160 |
+
"transformer.h.21.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
161 |
+
"transformer.h.21.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
162 |
+
"transformer.h.21.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
163 |
+
"transformer.h.22.ln.bias": "model-00001-of-00002.safetensors",
|
164 |
+
"transformer.h.22.ln.weight": "model-00001-of-00002.safetensors",
|
165 |
+
"transformer.h.22.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
166 |
+
"transformer.h.22.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
167 |
+
"transformer.h.22.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
168 |
+
"transformer.h.22.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
169 |
+
"transformer.h.22.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
170 |
+
"transformer.h.22.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
171 |
+
"transformer.h.22.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
172 |
+
"transformer.h.22.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
173 |
+
"transformer.h.23.ln.bias": "model-00001-of-00002.safetensors",
|
174 |
+
"transformer.h.23.ln.weight": "model-00001-of-00002.safetensors",
|
175 |
+
"transformer.h.23.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
176 |
+
"transformer.h.23.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
177 |
+
"transformer.h.23.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
178 |
+
"transformer.h.23.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
179 |
+
"transformer.h.23.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
180 |
+
"transformer.h.23.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
181 |
+
"transformer.h.23.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
182 |
+
"transformer.h.23.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
183 |
+
"transformer.h.24.ln.bias": "model-00001-of-00002.safetensors",
|
184 |
+
"transformer.h.24.ln.weight": "model-00001-of-00002.safetensors",
|
185 |
+
"transformer.h.24.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
186 |
+
"transformer.h.24.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
187 |
+
"transformer.h.24.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
188 |
+
"transformer.h.24.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
189 |
+
"transformer.h.24.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
190 |
+
"transformer.h.24.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
191 |
+
"transformer.h.24.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
192 |
+
"transformer.h.24.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
193 |
+
"transformer.h.25.ln.bias": "model-00001-of-00002.safetensors",
|
194 |
+
"transformer.h.25.ln.weight": "model-00001-of-00002.safetensors",
|
195 |
+
"transformer.h.25.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
196 |
+
"transformer.h.25.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
197 |
+
"transformer.h.25.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
198 |
+
"transformer.h.25.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
199 |
+
"transformer.h.25.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
200 |
+
"transformer.h.25.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
201 |
+
"transformer.h.25.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
202 |
+
"transformer.h.25.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
203 |
+
"transformer.h.26.ln.bias": "model-00001-of-00002.safetensors",
|
204 |
+
"transformer.h.26.ln.weight": "model-00001-of-00002.safetensors",
|
205 |
+
"transformer.h.26.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
206 |
+
"transformer.h.26.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
207 |
+
"transformer.h.26.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
208 |
+
"transformer.h.26.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
209 |
+
"transformer.h.26.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
210 |
+
"transformer.h.26.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
211 |
+
"transformer.h.26.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
212 |
+
"transformer.h.26.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
213 |
+
"transformer.h.27.ln.bias": "model-00001-of-00002.safetensors",
|
214 |
+
"transformer.h.27.ln.weight": "model-00001-of-00002.safetensors",
|
215 |
+
"transformer.h.27.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
216 |
+
"transformer.h.27.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
217 |
+
"transformer.h.27.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
218 |
+
"transformer.h.27.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
219 |
+
"transformer.h.27.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
220 |
+
"transformer.h.27.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
221 |
+
"transformer.h.27.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
222 |
+
"transformer.h.27.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
223 |
+
"transformer.h.28.ln.bias": "model-00001-of-00002.safetensors",
|
224 |
+
"transformer.h.28.ln.weight": "model-00001-of-00002.safetensors",
|
225 |
+
"transformer.h.28.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
226 |
+
"transformer.h.28.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
227 |
+
"transformer.h.28.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
228 |
+
"transformer.h.28.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
229 |
+
"transformer.h.28.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
230 |
+
"transformer.h.28.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
231 |
+
"transformer.h.28.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
232 |
+
"transformer.h.28.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
233 |
+
"transformer.h.29.ln.bias": "model-00001-of-00002.safetensors",
|
234 |
+
"transformer.h.29.ln.weight": "model-00001-of-00002.safetensors",
|
235 |
+
"transformer.h.29.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
236 |
+
"transformer.h.29.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
237 |
+
"transformer.h.29.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
238 |
+
"transformer.h.29.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
239 |
+
"transformer.h.29.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
240 |
+
"transformer.h.29.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
241 |
+
"transformer.h.29.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
242 |
+
"transformer.h.29.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
243 |
+
"transformer.h.3.ln.bias": "model-00001-of-00002.safetensors",
|
244 |
+
"transformer.h.3.ln.weight": "model-00001-of-00002.safetensors",
|
245 |
+
"transformer.h.3.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
246 |
+
"transformer.h.3.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
247 |
+
"transformer.h.3.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
248 |
+
"transformer.h.3.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
249 |
+
"transformer.h.3.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
250 |
+
"transformer.h.3.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
251 |
+
"transformer.h.3.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
252 |
+
"transformer.h.3.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
253 |
+
"transformer.h.30.ln.bias": "model-00001-of-00002.safetensors",
|
254 |
+
"transformer.h.30.ln.weight": "model-00001-of-00002.safetensors",
|
255 |
+
"transformer.h.30.mixer.Wqkv.bias": "model-00002-of-00002.safetensors",
|
256 |
+
"transformer.h.30.mixer.Wqkv.weight": "model-00002-of-00002.safetensors",
|
257 |
+
"transformer.h.30.mixer.out_proj.bias": "model-00002-of-00002.safetensors",
|
258 |
+
"transformer.h.30.mixer.out_proj.weight": "model-00002-of-00002.safetensors",
|
259 |
+
"transformer.h.30.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
260 |
+
"transformer.h.30.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
261 |
+
"transformer.h.30.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
262 |
+
"transformer.h.30.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
263 |
+
"transformer.h.31.ln.bias": "model-00002-of-00002.safetensors",
|
264 |
+
"transformer.h.31.ln.weight": "model-00002-of-00002.safetensors",
|
265 |
+
"transformer.h.31.mixer.Wqkv.bias": "model-00002-of-00002.safetensors",
|
266 |
+
"transformer.h.31.mixer.Wqkv.weight": "model-00002-of-00002.safetensors",
|
267 |
+
"transformer.h.31.mixer.out_proj.bias": "model-00002-of-00002.safetensors",
|
268 |
+
"transformer.h.31.mixer.out_proj.weight": "model-00002-of-00002.safetensors",
|
269 |
+
"transformer.h.31.mlp.fc1.bias": "model-00002-of-00002.safetensors",
|
270 |
+
"transformer.h.31.mlp.fc1.weight": "model-00002-of-00002.safetensors",
|
271 |
+
"transformer.h.31.mlp.fc2.bias": "model-00002-of-00002.safetensors",
|
272 |
+
"transformer.h.31.mlp.fc2.weight": "model-00002-of-00002.safetensors",
|
273 |
+
"transformer.h.4.ln.bias": "model-00001-of-00002.safetensors",
|
274 |
+
"transformer.h.4.ln.weight": "model-00001-of-00002.safetensors",
|
275 |
+
"transformer.h.4.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
276 |
+
"transformer.h.4.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
277 |
+
"transformer.h.4.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
278 |
+
"transformer.h.4.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
279 |
+
"transformer.h.4.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
280 |
+
"transformer.h.4.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
281 |
+
"transformer.h.4.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
282 |
+
"transformer.h.4.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
283 |
+
"transformer.h.5.ln.bias": "model-00001-of-00002.safetensors",
|
284 |
+
"transformer.h.5.ln.weight": "model-00001-of-00002.safetensors",
|
285 |
+
"transformer.h.5.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
286 |
+
"transformer.h.5.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
287 |
+
"transformer.h.5.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
288 |
+
"transformer.h.5.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
289 |
+
"transformer.h.5.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
290 |
+
"transformer.h.5.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
291 |
+
"transformer.h.5.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
292 |
+
"transformer.h.5.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
293 |
+
"transformer.h.6.ln.bias": "model-00001-of-00002.safetensors",
|
294 |
+
"transformer.h.6.ln.weight": "model-00001-of-00002.safetensors",
|
295 |
+
"transformer.h.6.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
296 |
+
"transformer.h.6.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
297 |
+
"transformer.h.6.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
298 |
+
"transformer.h.6.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
299 |
+
"transformer.h.6.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
300 |
+
"transformer.h.6.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
301 |
+
"transformer.h.6.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
302 |
+
"transformer.h.6.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
303 |
+
"transformer.h.7.ln.bias": "model-00001-of-00002.safetensors",
|
304 |
+
"transformer.h.7.ln.weight": "model-00001-of-00002.safetensors",
|
305 |
+
"transformer.h.7.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
306 |
+
"transformer.h.7.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
307 |
+
"transformer.h.7.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
308 |
+
"transformer.h.7.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
309 |
+
"transformer.h.7.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
310 |
+
"transformer.h.7.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
311 |
+
"transformer.h.7.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
312 |
+
"transformer.h.7.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
313 |
+
"transformer.h.8.ln.bias": "model-00001-of-00002.safetensors",
|
314 |
+
"transformer.h.8.ln.weight": "model-00001-of-00002.safetensors",
|
315 |
+
"transformer.h.8.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
316 |
+
"transformer.h.8.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
317 |
+
"transformer.h.8.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
318 |
+
"transformer.h.8.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
319 |
+
"transformer.h.8.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
320 |
+
"transformer.h.8.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
321 |
+
"transformer.h.8.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
322 |
+
"transformer.h.8.mlp.fc2.weight": "model-00001-of-00002.safetensors",
|
323 |
+
"transformer.h.9.ln.bias": "model-00001-of-00002.safetensors",
|
324 |
+
"transformer.h.9.ln.weight": "model-00001-of-00002.safetensors",
|
325 |
+
"transformer.h.9.mixer.Wqkv.bias": "model-00001-of-00002.safetensors",
|
326 |
+
"transformer.h.9.mixer.Wqkv.weight": "model-00001-of-00002.safetensors",
|
327 |
+
"transformer.h.9.mixer.out_proj.bias": "model-00001-of-00002.safetensors",
|
328 |
+
"transformer.h.9.mixer.out_proj.weight": "model-00001-of-00002.safetensors",
|
329 |
+
"transformer.h.9.mlp.fc1.bias": "model-00001-of-00002.safetensors",
|
330 |
+
"transformer.h.9.mlp.fc1.weight": "model-00001-of-00002.safetensors",
|
331 |
+
"transformer.h.9.mlp.fc2.bias": "model-00001-of-00002.safetensors",
|
332 |
+
"transformer.h.9.mlp.fc2.weight": "model-00001-of-00002.safetensors"
|
333 |
+
}
|
334 |
+
}
|
modeling_phi.py
ADDED
@@ -0,0 +1,967 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# Licensed under the MIT license.
|
3 |
+
#
|
4 |
+
# Copyright (c) 2022, Tri Dao, [email protected].
|
5 |
+
# Licensed under the BSD 3-Clause License.
|
6 |
+
|
7 |
+
from __future__ import annotations
|
8 |
+
|
9 |
+
import math
|
10 |
+
from dataclasses import dataclass, field
|
11 |
+
from typing import Any, Dict, Optional, Tuple, Union
|
12 |
+
|
13 |
+
import torch
|
14 |
+
import torch.nn as nn
|
15 |
+
from einops import rearrange, repeat
|
16 |
+
from transformers import PretrainedConfig, PreTrainedModel
|
17 |
+
from transformers.activations import ACT2FN
|
18 |
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
19 |
+
|
20 |
+
from .configuration_phi import PhiConfig
|
21 |
+
|
22 |
+
try:
|
23 |
+
from flash_attn.bert_padding import pad_input, unpad_input
|
24 |
+
from flash_attn.layers.rotary import RotaryEmbedding as FlashRotaryEmbedding
|
25 |
+
from flash_attn.modules.mha import FlashCrossAttention, FlashSelfAttention
|
26 |
+
from flash_attn.ops.fused_dense import FusedDense
|
27 |
+
except:
|
28 |
+
pad_input, unpad_input = None, None
|
29 |
+
FlashRotaryEmbedding = None
|
30 |
+
FlashSelfAttention, FlashCrossAttention = None, None
|
31 |
+
FusedDense = None
|
32 |
+
|
33 |
+
|
34 |
+
@dataclass
|
35 |
+
class InferenceParams:
|
36 |
+
"""Inference parameters passed to model to efficiently calculate
|
37 |
+
and store context during inference.
|
38 |
+
|
39 |
+
Reference:
|
40 |
+
https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/utils/generation.py.
|
41 |
+
|
42 |
+
Args:
|
43 |
+
max_seqlen: Maximum sequence length.
|
44 |
+
max_batch_size: Maximum batch size.
|
45 |
+
seqlen_offset: Sequence length offset.
|
46 |
+
batch_size_offset: Batch size offset.
|
47 |
+
key_value_memory_dict: Key value memory dictionary.
|
48 |
+
lengths_per_sample: Lengths per sample.
|
49 |
+
|
50 |
+
"""
|
51 |
+
|
52 |
+
max_seqlen: int = field(metadata={"help": "Maximum sequence length."})
|
53 |
+
|
54 |
+
max_batch_size: int = field(metadata={"help": "Maximum batch size."})
|
55 |
+
|
56 |
+
seqlen_offset: int = field(default=0, metadata={"help": "Sequence length offset."})
|
57 |
+
|
58 |
+
batch_size_offset: int = field(default=0, metadata={"help": "Batch size offset."})
|
59 |
+
|
60 |
+
key_value_memory_dict: Dict[str, Any] = field(
|
61 |
+
default_factory=dict, metadata={"help": "Key value memory dictionary."}
|
62 |
+
)
|
63 |
+
|
64 |
+
lengths_per_sample: torch.Tensor = field(default=None, metadata={"help": "Lengths per sample."})
|
65 |
+
|
66 |
+
|
67 |
+
class Embedding(nn.Module):
|
68 |
+
"""Token embedding with dropout."""
|
69 |
+
|
70 |
+
def __init__(self, config: PretrainedConfig) -> None:
|
71 |
+
super().__init__()
|
72 |
+
|
73 |
+
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
|
74 |
+
self.drop = nn.Dropout(config.embd_pdrop)
|
75 |
+
|
76 |
+
def forward(self, input_ids: torch.LongTensor) -> torch.FloatTensor:
|
77 |
+
input_shape = input_ids.size()
|
78 |
+
input_ids = input_ids.view(-1, input_shape[-1])
|
79 |
+
|
80 |
+
hidden_states = self.wte(input_ids)
|
81 |
+
hidden_states = self.drop(hidden_states)
|
82 |
+
|
83 |
+
return hidden_states
|
84 |
+
|
85 |
+
|
86 |
+
def _apply_rotary_emb(
|
87 |
+
x: torch.FloatTensor,
|
88 |
+
cos: torch.FloatTensor,
|
89 |
+
sin: torch.FloatTensor,
|
90 |
+
) -> torch.FloatTensor:
|
91 |
+
_, seqlen, _, _ = x.shape
|
92 |
+
_, rotary_dim = cos.shape
|
93 |
+
rotary_dim *= 2
|
94 |
+
|
95 |
+
x_rot = x[:, :, :, :rotary_dim]
|
96 |
+
x_pass = x[:, :, :, rotary_dim:]
|
97 |
+
|
98 |
+
x1, x2 = x_rot.chunk(2, dim=-1)
|
99 |
+
c, s = rearrange(cos[:seqlen], "s d -> s 1 d"), rearrange(sin[:seqlen], "s d -> s 1 d")
|
100 |
+
x1, x2, c, s = [t.to(dtype=torch.float32) for t in [x1, x2, c, s]]
|
101 |
+
|
102 |
+
x_rot = torch.cat([x1 * c - x2 * s, x1 * s + x2 * c], axis=-1).to(x.dtype)
|
103 |
+
|
104 |
+
return torch.cat([x_rot, x_pass], axis=-1)
|
105 |
+
|
106 |
+
|
107 |
+
def _apply_rotary_emb_kv(
|
108 |
+
kv: torch.FloatTensor,
|
109 |
+
cos: torch.FloatTensor,
|
110 |
+
sin: torch.FloatTensor,
|
111 |
+
cos_k: Optional[torch.FloatTensor] = None,
|
112 |
+
sin_k: Optional[torch.FloatTensor] = None,
|
113 |
+
) -> torch.FloatTensor:
|
114 |
+
_, seqlen, _, _, _ = kv.shape
|
115 |
+
_, rotary_dim = cos.shape
|
116 |
+
rotary_dim *= 2
|
117 |
+
|
118 |
+
k_rot = kv[:, :, 0, :, :rotary_dim]
|
119 |
+
k_pass = kv[:, :, 0, :, rotary_dim:]
|
120 |
+
|
121 |
+
k1, k2 = k_rot.chunk(2, dim=-1)
|
122 |
+
c, s = rearrange(cos[:seqlen], "s d -> s 1 d"), rearrange(sin[:seqlen], "s d -> s 1 d")
|
123 |
+
k1, k2, c, s = [t.to(dtype=torch.float32) for t in [k1, k2, c, s]]
|
124 |
+
|
125 |
+
k_rot = torch.cat([k1 * c - k2 * s, k1 * s + k2 * c], axis=-1).to(kv.dtype)
|
126 |
+
|
127 |
+
return torch.cat(
|
128 |
+
[
|
129 |
+
torch.cat([k_rot, k_pass], axis=-1).unsqueeze(2),
|
130 |
+
kv[:, :, 1:2, :, :],
|
131 |
+
],
|
132 |
+
axis=2,
|
133 |
+
)
|
134 |
+
|
135 |
+
|
136 |
+
def _apply_rotary_emb_qkv(
|
137 |
+
qkv: torch.FloatTensor,
|
138 |
+
cos: torch.FloatTensor,
|
139 |
+
sin: torch.FloatTensor,
|
140 |
+
cos_k: Optional[torch.FloatTensor] = None,
|
141 |
+
sin_k: Optional[torch.FloatTensor] = None,
|
142 |
+
) -> torch.FloatTensor:
|
143 |
+
_, seqlen, _, _, _ = qkv.shape
|
144 |
+
_, rotary_dim = cos.shape
|
145 |
+
rotary_dim *= 2
|
146 |
+
|
147 |
+
q_rot = qkv[:, :, 0, :, :rotary_dim]
|
148 |
+
q_pass = qkv[:, :, 0, :, rotary_dim:]
|
149 |
+
|
150 |
+
k_rot = qkv[:, :, 1, :, :rotary_dim]
|
151 |
+
k_pass = qkv[:, :, 1, :, rotary_dim:]
|
152 |
+
|
153 |
+
q1, q2 = q_rot.chunk(2, dim=-1)
|
154 |
+
k1, k2 = k_rot.chunk(2, dim=-1)
|
155 |
+
c, s = rearrange(cos[:seqlen], "s d -> s 1 d"), rearrange(sin[:seqlen], "s d -> s 1 d")
|
156 |
+
q1, q2, k1, k2, c, s = [t.to(dtype=torch.float32) for t in [q1, q2, k1, k2, c, s]]
|
157 |
+
|
158 |
+
q_rot = torch.cat([q1 * c - q2 * s, q1 * s + q2 * c], axis=-1).to(qkv.dtype)
|
159 |
+
k_rot = torch.cat([k1 * c - k2 * s, k1 * s + k2 * c], axis=-1).to(qkv.dtype)
|
160 |
+
|
161 |
+
return torch.cat(
|
162 |
+
[
|
163 |
+
torch.cat([q_rot, q_pass], axis=-1).unsqueeze(2),
|
164 |
+
torch.cat([k_rot, k_pass], axis=-1).unsqueeze(2),
|
165 |
+
qkv[:, :, 2:3, :, :],
|
166 |
+
],
|
167 |
+
axis=2,
|
168 |
+
)
|
169 |
+
|
170 |
+
|
171 |
+
class RotaryEmbedding(nn.Module):
|
172 |
+
"""Rotary positional embedding (RoPE).
|
173 |
+
|
174 |
+
Reference:
|
175 |
+
RoFormer: Enhanced Transformer with Rotary Position Embedding.
|
176 |
+
https://arxiv.org/pdf/2104.09864.pdf.
|
177 |
+
|
178 |
+
"""
|
179 |
+
|
180 |
+
def __init__(
|
181 |
+
self,
|
182 |
+
dim: int,
|
183 |
+
base: int = 10000,
|
184 |
+
scale_base: Optional[float] = None,
|
185 |
+
pos_idx_in_fp32: bool = True,
|
186 |
+
max_position_embeddings: int = 2048,
|
187 |
+
device: Optional[str] = None,
|
188 |
+
**kwargs,
|
189 |
+
) -> None:
|
190 |
+
super().__init__()
|
191 |
+
|
192 |
+
if scale_base is not None:
|
193 |
+
raise NotImplementedError
|
194 |
+
|
195 |
+
self.dim = dim
|
196 |
+
self.base = float(base)
|
197 |
+
self.scale_base = scale_base
|
198 |
+
self.pos_idx_in_fp32 = pos_idx_in_fp32
|
199 |
+
self.max_position_embeddings = max_position_embeddings
|
200 |
+
self.device = device
|
201 |
+
|
202 |
+
# Generate and save the inverse frequency buffer (non-trainable)
|
203 |
+
inv_freq = self._compute_inv_freq(device)
|
204 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
205 |
+
|
206 |
+
# Generate and save the scale buffer (non-trainable)
|
207 |
+
scale = (
|
208 |
+
(torch.arange(0, dim, 2, device=device, dtype=torch.float32) + 0.4 * dim) / (1.4 * dim)
|
209 |
+
if scale_base is not None
|
210 |
+
else None
|
211 |
+
)
|
212 |
+
self.register_buffer("scale", scale, persistent=False)
|
213 |
+
|
214 |
+
# Initialize cached attributes since ONNX can't rely on dynamic initialization
|
215 |
+
self._update_cos_sin_cache(max_position_embeddings, device=device, dtype=torch.float32)
|
216 |
+
|
217 |
+
def _compute_inv_freq(self, device: Optional[str] = None) -> torch.FloatTensor:
|
218 |
+
return 1.0 / (self.base ** (torch.arange(0, self.dim, 2, device=device, dtype=torch.float32) / self.dim))
|
219 |
+
|
220 |
+
def _update_cos_sin_cache(
|
221 |
+
self,
|
222 |
+
seqlen: int,
|
223 |
+
device: Optional[str] = None,
|
224 |
+
dtype: Optional[torch.dtype] = None,
|
225 |
+
) -> None:
|
226 |
+
self._seq_len_cached = seqlen
|
227 |
+
|
228 |
+
# fp32 is preferred since the output of `torch.arange` can be quite large
|
229 |
+
# and bf16 would lose a lot of precision
|
230 |
+
if self.pos_idx_in_fp32:
|
231 |
+
t = torch.arange(seqlen, device=device, dtype=torch.float32)
|
232 |
+
if self.inv_freq.dtype != torch.float32:
|
233 |
+
inv_freq = self._compute_inv_freq(device=device)
|
234 |
+
else:
|
235 |
+
inv_freq = self.inv_freq
|
236 |
+
else:
|
237 |
+
t = torch.arange(seqlen, device=device, dtype=self.inv_freq.dtype)
|
238 |
+
inv_freq = self.inv_freq
|
239 |
+
|
240 |
+
# `torch.outer` is preferred since `torch.einsum` converts from fp32 to fp16 if used with AMP
|
241 |
+
freqs = torch.outer(t, inv_freq)
|
242 |
+
if self.scale is None:
|
243 |
+
self._cos_cached = torch.cos(freqs).to(dtype)
|
244 |
+
self._sin_cached = torch.sin(freqs).to(dtype)
|
245 |
+
else:
|
246 |
+
power = (
|
247 |
+
torch.arange(seqlen, dtype=self.scale.dtype, device=self.scale.device) - seqlen // 2
|
248 |
+
) / self.scale_base
|
249 |
+
scale = self.scale.to(device=power.device) ** rearrange(power, "s -> s 1")
|
250 |
+
|
251 |
+
# Force the scale multiplication to happen in fp32
|
252 |
+
self._cos_cached = (torch.cos(freqs) * scale).to(dtype)
|
253 |
+
self._sin_cached = (torch.sin(freqs) * scale).to(dtype)
|
254 |
+
self._cos_k_cached = (torch.cos(freqs) / scale).to(dtype)
|
255 |
+
self._sin_k_cached = (torch.sin(freqs) / scale).to(dtype)
|
256 |
+
|
257 |
+
def forward(
|
258 |
+
self,
|
259 |
+
qkv: torch.Tensor,
|
260 |
+
kv: Optional[torch.Tensor] = None,
|
261 |
+
seqlen_offset: int = 0,
|
262 |
+
**kwargs,
|
263 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
264 |
+
if (
|
265 |
+
self._seq_len_cached < qkv.shape[1] + seqlen_offset
|
266 |
+
or self._cos_cached.device != qkv.device
|
267 |
+
or self._cos_cached.dtype != qkv.dtype
|
268 |
+
or (self.training and self._cos_cached.is_inference())
|
269 |
+
):
|
270 |
+
self._update_cos_sin_cache(qkv.shape[1] + seqlen_offset, device=qkv.device, dtype=qkv.dtype)
|
271 |
+
|
272 |
+
if kv is None:
|
273 |
+
return _apply_rotary_emb_qkv(
|
274 |
+
qkv,
|
275 |
+
self._cos_cached[seqlen_offset:],
|
276 |
+
self._sin_cached[seqlen_offset:],
|
277 |
+
)
|
278 |
+
else:
|
279 |
+
q = _apply_rotary_emb(
|
280 |
+
qkv,
|
281 |
+
self._cos_cached[seqlen_offset:],
|
282 |
+
self._sin_cached[seqlen_offset:],
|
283 |
+
)
|
284 |
+
kv = _apply_rotary_emb_kv(
|
285 |
+
kv,
|
286 |
+
self._cos_cached[seqlen_offset:],
|
287 |
+
self._sin_cached[seqlen_offset:],
|
288 |
+
)
|
289 |
+
|
290 |
+
return q, kv
|
291 |
+
|
292 |
+
|
293 |
+
class MLP(nn.Module):
|
294 |
+
"""Multi-Layer Perceptron.
|
295 |
+
|
296 |
+
Reference:
|
297 |
+
Attention Is All You Need.
|
298 |
+
https://arxiv.org/pdf/1706.03762.pdf.
|
299 |
+
|
300 |
+
"""
|
301 |
+
|
302 |
+
def __init__(
|
303 |
+
self,
|
304 |
+
config: PretrainedConfig,
|
305 |
+
n_inner: Optional[int] = None,
|
306 |
+
act_fn: Optional[str] = None,
|
307 |
+
) -> None:
|
308 |
+
super().__init__()
|
309 |
+
|
310 |
+
act_fn = config.activation_function if act_fn is None else act_fn
|
311 |
+
|
312 |
+
n_inner = getattr(config, "n_inner", None) if n_inner is None else n_inner
|
313 |
+
n_inner = n_inner if n_inner is not None else 4 * config.n_embd
|
314 |
+
|
315 |
+
self.fc1 = nn.Linear(config.n_embd, n_inner)
|
316 |
+
self.fc2 = nn.Linear(n_inner, config.n_embd)
|
317 |
+
self.act = ACT2FN[act_fn]
|
318 |
+
|
319 |
+
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
|
320 |
+
hidden_states = self.fc1(hidden_states)
|
321 |
+
hidden_states = self.act(hidden_states)
|
322 |
+
hidden_states = self.fc2(hidden_states)
|
323 |
+
|
324 |
+
return hidden_states
|
325 |
+
|
326 |
+
|
327 |
+
class SelfAttention(nn.Module):
|
328 |
+
"""Self-attention layer (compatible with PyTorch).
|
329 |
+
|
330 |
+
Reference:
|
331 |
+
https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/modules/mha.py.
|
332 |
+
|
333 |
+
"""
|
334 |
+
|
335 |
+
def __init__(
|
336 |
+
self,
|
337 |
+
causal: bool = True,
|
338 |
+
softmax_scale: Optional[float] = None,
|
339 |
+
attention_dropout: float = 0.0,
|
340 |
+
) -> None:
|
341 |
+
super().__init__()
|
342 |
+
|
343 |
+
self.causal = causal
|
344 |
+
self.softmax_scale = softmax_scale
|
345 |
+
self.drop = nn.Dropout(attention_dropout)
|
346 |
+
|
347 |
+
@torch.autocast("cpu", enabled=False)
|
348 |
+
@torch.autocast("cuda", enabled=False)
|
349 |
+
def forward(
|
350 |
+
self,
|
351 |
+
qkv: torch.FloatTensor,
|
352 |
+
causal: bool = None,
|
353 |
+
key_padding_mask: Optional[torch.BoolTensor] = None,
|
354 |
+
**kwargs,
|
355 |
+
) -> torch.FloatTensor:
|
356 |
+
batch_size, seqlen = qkv.shape[0], qkv.shape[1]
|
357 |
+
q, k, v = qkv.unbind(dim=2)
|
358 |
+
|
359 |
+
q = q.to(torch.float32)
|
360 |
+
k = k.to(torch.float32)
|
361 |
+
|
362 |
+
causal = self.causal if causal is None else causal
|
363 |
+
softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1])
|
364 |
+
|
365 |
+
# Autocast is manually disabled to avoid `torch.einsum` performing the operation
|
366 |
+
# using float16, which might lead to overflow
|
367 |
+
scores = torch.einsum("bthd,bshd->bhts", q, k * softmax_scale)
|
368 |
+
|
369 |
+
if key_padding_mask is not None:
|
370 |
+
padding_mask = torch.full((batch_size, seqlen), -10000.0, dtype=scores.dtype, device=scores.device)
|
371 |
+
padding_mask.masked_fill_(key_padding_mask, 0.0)
|
372 |
+
|
373 |
+
scores = scores + rearrange(padding_mask, "b s -> b 1 1 s")
|
374 |
+
|
375 |
+
if causal:
|
376 |
+
causal_mask = torch.triu(torch.full((seqlen, seqlen), -10000.0, device=scores.device), 1)
|
377 |
+
scores = scores + causal_mask.to(dtype=scores.dtype)
|
378 |
+
|
379 |
+
attention = torch.softmax(scores, dim=-1).to(v.dtype)
|
380 |
+
attention = self.drop(attention)
|
381 |
+
|
382 |
+
output = torch.einsum("bhts,bshd->bthd", attention, v)
|
383 |
+
|
384 |
+
return output
|
385 |
+
|
386 |
+
|
387 |
+
class CrossAttention(nn.Module):
|
388 |
+
"""Cross-attention layer (compatible with PyTorch).
|
389 |
+
|
390 |
+
Reference:
|
391 |
+
https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/modules/mha.py.
|
392 |
+
|
393 |
+
"""
|
394 |
+
|
395 |
+
def __init__(
|
396 |
+
self,
|
397 |
+
causal: bool = True,
|
398 |
+
softmax_scale: Optional[float] = None,
|
399 |
+
attention_dropout: float = 0.0,
|
400 |
+
) -> None:
|
401 |
+
super().__init__()
|
402 |
+
|
403 |
+
self.causal = causal
|
404 |
+
self.softmax_scale = softmax_scale
|
405 |
+
self.drop = nn.Dropout(attention_dropout)
|
406 |
+
|
407 |
+
@torch.autocast("cpu", enabled=False)
|
408 |
+
@torch.autocast("cuda", enabled=False)
|
409 |
+
def forward(
|
410 |
+
self,
|
411 |
+
q: torch.FloatTensor,
|
412 |
+
kv: torch.FloatTensor,
|
413 |
+
causal: bool = None,
|
414 |
+
key_padding_mask: Optional[torch.BoolTensor] = None,
|
415 |
+
**kwargs,
|
416 |
+
) -> torch.FloatTensor:
|
417 |
+
batch_size, seqlen_q = q.shape[0], q.shape[1]
|
418 |
+
seqlen_k = kv.shape[1]
|
419 |
+
|
420 |
+
if kv.shape[3] != q.shape[2]:
|
421 |
+
kv = repeat(kv, "... hkv d -> ... (hkv g) d", g=q.shape[2] // kv.shape[3])
|
422 |
+
k, v = kv.unbind(dim=2)
|
423 |
+
|
424 |
+
q = q.to(torch.float32)
|
425 |
+
k = k.to(torch.float32)
|
426 |
+
|
427 |
+
causal = self.causal if causal is None else causal
|
428 |
+
softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1])
|
429 |
+
|
430 |
+
# Autocast is manually disabled to avoid `torch.einsum` performing the operation
|
431 |
+
# using float16, which might lead to overflow
|
432 |
+
scores = torch.einsum("bthd,bshd->bhts", q, k * softmax_scale)
|
433 |
+
|
434 |
+
if key_padding_mask is not None:
|
435 |
+
padding_mask = torch.full(
|
436 |
+
(batch_size, seqlen_k),
|
437 |
+
-10000.0,
|
438 |
+
dtype=scores.dtype,
|
439 |
+
device=scores.device,
|
440 |
+
)
|
441 |
+
padding_mask.masked_fill_(key_padding_mask, 0.0)
|
442 |
+
|
443 |
+
scores = scores + rearrange(padding_mask, "b s -> b 1 1 s")
|
444 |
+
|
445 |
+
if causal:
|
446 |
+
rows = rearrange(torch.arange(seqlen_q, device=q.device, dtype=torch.long), "s -> s 1")
|
447 |
+
cols = torch.arange(seqlen_k, device=k.device, dtype=torch.long)
|
448 |
+
causal_mask = cols > rows + seqlen_k - seqlen_q
|
449 |
+
|
450 |
+
scores = scores.masked_fill(causal_mask, -10000.0)
|
451 |
+
|
452 |
+
attention = torch.softmax(scores, dim=-1).to(v.dtype)
|
453 |
+
attention = self.drop(attention)
|
454 |
+
|
455 |
+
output = torch.einsum("bhts,bshd->bthd", attention, v)
|
456 |
+
|
457 |
+
return output
|
458 |
+
|
459 |
+
|
460 |
+
def _find_mha_dims(
|
461 |
+
config: PretrainedConfig,
|
462 |
+
n_head: Optional[int] = None,
|
463 |
+
n_head_kv: Optional[int] = None,
|
464 |
+
head_dim: Optional[int] = None,
|
465 |
+
) -> Tuple[int, int]:
|
466 |
+
if n_head is None and head_dim is None:
|
467 |
+
head_dim = config.n_embd // config.n_head
|
468 |
+
n_head = config.n_head
|
469 |
+
elif n_head is None or head_dim is None:
|
470 |
+
raise ValueError("`n_head` and `head_dim` must be both specified or `None`.")
|
471 |
+
|
472 |
+
if n_head_kv is None:
|
473 |
+
n_head_kv = getattr(config, "n_head_kv", None) or n_head
|
474 |
+
|
475 |
+
return n_head, n_head_kv, head_dim
|
476 |
+
|
477 |
+
|
478 |
+
def _update_kv_cache(kv: torch.FloatTensor, inference_params: InferenceParams, layer_idx: int) -> torch.FloatTensor:
|
479 |
+
num_heads, head_dim = kv.shape[-2:]
|
480 |
+
|
481 |
+
if layer_idx not in inference_params.key_value_memory_dict:
|
482 |
+
inference_params.key_value_memory_dict[layer_idx] = torch.empty(
|
483 |
+
inference_params.max_batch_size,
|
484 |
+
inference_params.max_seqlen,
|
485 |
+
2,
|
486 |
+
num_heads,
|
487 |
+
head_dim,
|
488 |
+
dtype=kv.dtype,
|
489 |
+
device=kv.device,
|
490 |
+
)
|
491 |
+
|
492 |
+
batch_start = inference_params.batch_size_offset
|
493 |
+
batch_end = batch_start + kv.shape[0]
|
494 |
+
|
495 |
+
sequence_start = inference_params.seqlen_offset
|
496 |
+
sequence_end = sequence_start + kv.shape[1]
|
497 |
+
|
498 |
+
# When the current sequence length is equal to or larger than the maximum sequence length,
|
499 |
+
# we need to concatenate the current `kv` with the cached `kv` to expand its length
|
500 |
+
if sequence_end >= inference_params.max_seqlen:
|
501 |
+
inference_params.key_value_memory_dict[layer_idx] = torch.concatenate((inference_params.key_value_memory_dict[layer_idx], kv), dim=1)
|
502 |
+
|
503 |
+
inference_params.key_value_memory_dict[layer_idx][batch_start:batch_end, sequence_start:sequence_end, ...] = kv
|
504 |
+
kv = inference_params.key_value_memory_dict[layer_idx][batch_start:batch_end, :sequence_end, ...]
|
505 |
+
|
506 |
+
return kv
|
507 |
+
|
508 |
+
|
509 |
+
class MHA(nn.Module):
|
510 |
+
"""Multi-head attention layer."""
|
511 |
+
|
512 |
+
def __init__(
|
513 |
+
self,
|
514 |
+
config: PretrainedConfig,
|
515 |
+
dtype: Optional[torch.dtype] = None,
|
516 |
+
device: Optional[str] = None,
|
517 |
+
rotary_dim: Optional[int] = None,
|
518 |
+
rotary_base: float = 10000.0,
|
519 |
+
rotary_scale_base: Optional[float] = None,
|
520 |
+
n_head: Optional[int] = None,
|
521 |
+
n_head_kv: Optional[int] = None,
|
522 |
+
head_dim: Optional[int] = None,
|
523 |
+
bias: bool = True,
|
524 |
+
causal: bool = True,
|
525 |
+
softmax_scale: Optional[float] = None,
|
526 |
+
layer_idx: Optional[int] = None,
|
527 |
+
return_residual: bool = False,
|
528 |
+
checkpointing: bool = False,
|
529 |
+
) -> None:
|
530 |
+
super().__init__()
|
531 |
+
|
532 |
+
# Rotary embedding
|
533 |
+
self.rotary_dim = rotary_dim if rotary_dim is not None else getattr(config, "rotary_dim", 0)
|
534 |
+
if self.rotary_dim > 0:
|
535 |
+
rotary_cls = FlashRotaryEmbedding if config.flash_rotary else RotaryEmbedding
|
536 |
+
if rotary_cls is None:
|
537 |
+
rotary_cls = RotaryEmbedding
|
538 |
+
|
539 |
+
rotary_kwargs = {}
|
540 |
+
if rotary_cls is RotaryEmbedding:
|
541 |
+
rotary_kwargs["max_position_embeddings"] = config.n_positions
|
542 |
+
|
543 |
+
self.rotary_emb = rotary_cls(
|
544 |
+
self.rotary_dim,
|
545 |
+
base=rotary_base,
|
546 |
+
scale_base=rotary_scale_base,
|
547 |
+
device=device,
|
548 |
+
**rotary_kwargs,
|
549 |
+
)
|
550 |
+
|
551 |
+
# MLP
|
552 |
+
self.n_head, self.n_head_kv, self.head_dim = _find_mha_dims(
|
553 |
+
config, n_head=n_head, n_head_kv=n_head_kv, head_dim=head_dim
|
554 |
+
)
|
555 |
+
op_size = self.head_dim * (self.n_head + 2 * self.n_head_kv)
|
556 |
+
hidden_size = config.n_embd
|
557 |
+
|
558 |
+
linear_cls = FusedDense if config.fused_dense else nn.Linear
|
559 |
+
if linear_cls is None:
|
560 |
+
linear_cls = nn.Linear
|
561 |
+
|
562 |
+
self.Wqkv = linear_cls(hidden_size, op_size, bias=bias, device=device, dtype=dtype)
|
563 |
+
self.out_proj = linear_cls(hidden_size, hidden_size, bias=bias, device=device, dtype=dtype)
|
564 |
+
|
565 |
+
# Attention
|
566 |
+
attn_cls = FlashSelfAttention if config.flash_attn else SelfAttention
|
567 |
+
if attn_cls is None:
|
568 |
+
attn_cls = SelfAttention
|
569 |
+
|
570 |
+
cross_attn_cls = FlashCrossAttention if config.flash_attn else CrossAttention
|
571 |
+
if cross_attn_cls is None:
|
572 |
+
cross_attn_cls = CrossAttention
|
573 |
+
|
574 |
+
self.inner_attn = attn_cls(
|
575 |
+
causal=causal,
|
576 |
+
softmax_scale=softmax_scale,
|
577 |
+
attention_dropout=config.attn_pdrop,
|
578 |
+
)
|
579 |
+
self.inner_cross_attn = cross_attn_cls(
|
580 |
+
causal=causal,
|
581 |
+
softmax_scale=softmax_scale,
|
582 |
+
attention_dropout=config.attn_pdrop,
|
583 |
+
)
|
584 |
+
|
585 |
+
self.flash_attn = config.flash_attn and attn_cls is FlashSelfAttention
|
586 |
+
self.layer_idx = layer_idx
|
587 |
+
self.return_residual = return_residual
|
588 |
+
self.checkpointing = checkpointing
|
589 |
+
|
590 |
+
def _forward_self_attn(
|
591 |
+
self, x: torch.FloatTensor, key_padding_mask: Optional[torch.BoolTensor]
|
592 |
+
) -> torch.FloatTensor:
|
593 |
+
qkv = self.Wqkv(x)
|
594 |
+
qkv = rearrange(qkv, "... (three h d) -> ... three h d", three=3, d=self.head_dim)
|
595 |
+
|
596 |
+
if self.rotary_dim > 0:
|
597 |
+
qkv = self.rotary_emb(qkv)
|
598 |
+
|
599 |
+
if self.flash_attn:
|
600 |
+
batch_size, seqlen = qkv.shape[0], qkv.shape[1]
|
601 |
+
|
602 |
+
cu_seqlens, max_seqlen = None, None
|
603 |
+
if key_padding_mask is not None:
|
604 |
+
# If `key_padding_mask` is supplied, we need to unpad the input and retrieve
|
605 |
+
# the `cu_seqlens` and `max_seqlen` to be used by `flash-attn`
|
606 |
+
qkv, indices, cu_seqlens, max_seqlen = unpad_input(qkv, key_padding_mask)
|
607 |
+
|
608 |
+
if self.checkpointing and self.training:
|
609 |
+
attn_output = torch.utils.checkpoint.checkpoint(
|
610 |
+
self.inner_attn, qkv, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen
|
611 |
+
)
|
612 |
+
else:
|
613 |
+
attn_output = self.inner_attn(qkv, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen).to(qkv.device)
|
614 |
+
|
615 |
+
# If `key_padding_mask` is supplied, we need to pad the output back to the original shape
|
616 |
+
return pad_input(attn_output, indices, batch_size, seqlen) if key_padding_mask is not None else attn_output
|
617 |
+
|
618 |
+
if self.checkpointing and self.training:
|
619 |
+
return torch.utils.checkpoint.checkpoint(self.inner_attn, qkv, key_padding_mask=key_padding_mask, use_reentrant=False)
|
620 |
+
|
621 |
+
return self.inner_attn(qkv, key_padding_mask=key_padding_mask)
|
622 |
+
|
623 |
+
def _forward_cross_attn(
|
624 |
+
self,
|
625 |
+
x: torch.FloatTensor,
|
626 |
+
past_key_values: Optional[InferenceParams],
|
627 |
+
key_padding_mask: Optional[torch.BoolTensor],
|
628 |
+
) -> torch.FloatTensor:
|
629 |
+
batch_size = x.shape[0]
|
630 |
+
|
631 |
+
qkv = self.Wqkv(x)
|
632 |
+
|
633 |
+
q = qkv[..., : self.n_head * self.head_dim]
|
634 |
+
q = rearrange(q, "... (h d) -> ... h d", d=self.head_dim)
|
635 |
+
|
636 |
+
kv = qkv[..., self.n_head * self.head_dim :]
|
637 |
+
kv = rearrange(kv, "... (two hkv d) -> ... two hkv d", two=2, d=self.head_dim)
|
638 |
+
|
639 |
+
seqlen_offset = past_key_values.seqlen_offset if past_key_values is not None else 0
|
640 |
+
causal = None if seqlen_offset == 0 else False
|
641 |
+
if self.rotary_dim > 0:
|
642 |
+
q, kv = self.rotary_emb(q, kv=kv, seqlen_offset=seqlen_offset)
|
643 |
+
|
644 |
+
if past_key_values is not None:
|
645 |
+
kv = _update_kv_cache(kv, past_key_values, self.layer_idx)
|
646 |
+
|
647 |
+
if self.flash_attn:
|
648 |
+
batch_size, seqlen_q = q.shape[0], q.shape[1]
|
649 |
+
seqlen_k = kv.shape[1]
|
650 |
+
|
651 |
+
cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k = (
|
652 |
+
None,
|
653 |
+
None,
|
654 |
+
None,
|
655 |
+
None,
|
656 |
+
)
|
657 |
+
if key_padding_mask is not None:
|
658 |
+
kv, _, cu_seqlens_k, max_seqlen_k = unpad_input(kv, key_padding_mask)
|
659 |
+
|
660 |
+
if seqlen_q == 1:
|
661 |
+
key_padding_mask = torch.ones(batch_size, 1, device=q.device)
|
662 |
+
elif seqlen_q != seqlen_k:
|
663 |
+
key_padding_mask = key_padding_mask[:, -seqlen_q:]
|
664 |
+
|
665 |
+
q, indices_q, cu_seqlens_q, max_seqlen_q = unpad_input(q, key_padding_mask)
|
666 |
+
|
667 |
+
if self.checkpointing and self.training:
|
668 |
+
attn_output = torch.utils.checkpoint.checkpoint(
|
669 |
+
self.inner_cross_attn,
|
670 |
+
q,
|
671 |
+
kv,
|
672 |
+
causal=causal,
|
673 |
+
cu_seqlens=cu_seqlens_q,
|
674 |
+
max_seqlen=max_seqlen_q,
|
675 |
+
cu_seqlens_k=cu_seqlens_k,
|
676 |
+
max_seqlen_k=max_seqlen_k,
|
677 |
+
use_reentrant=False
|
678 |
+
)
|
679 |
+
else:
|
680 |
+
attn_output = self.inner_cross_attn(
|
681 |
+
q,
|
682 |
+
kv,
|
683 |
+
causal=causal,
|
684 |
+
cu_seqlens=cu_seqlens_q,
|
685 |
+
max_seqlen=max_seqlen_q,
|
686 |
+
cu_seqlens_k=cu_seqlens_k,
|
687 |
+
max_seqlen_k=max_seqlen_k,
|
688 |
+
)
|
689 |
+
|
690 |
+
return (
|
691 |
+
pad_input(attn_output, indices_q, batch_size, max_seqlen_q)
|
692 |
+
if key_padding_mask is not None
|
693 |
+
else attn_output
|
694 |
+
)
|
695 |
+
|
696 |
+
if self.checkpointing and self.training:
|
697 |
+
return torch.utils.checkpoint.checkpoint(
|
698 |
+
self.inner_cross_attn,
|
699 |
+
q,
|
700 |
+
kv,
|
701 |
+
key_padding_mask=key_padding_mask,
|
702 |
+
causal=causal,
|
703 |
+
use_reentrant=False
|
704 |
+
)
|
705 |
+
|
706 |
+
return self.inner_cross_attn(q, kv, key_padding_mask=key_padding_mask, causal=causal)
|
707 |
+
|
708 |
+
def forward(
|
709 |
+
self,
|
710 |
+
x: torch.FloatTensor,
|
711 |
+
past_key_values: Optional[InferenceParams] = None,
|
712 |
+
attention_mask: Optional[Union[torch.LongTensor, torch.BoolTensor]] = None,
|
713 |
+
**kwargs,
|
714 |
+
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
|
715 |
+
if attention_mask is not None:
|
716 |
+
attention_mask = attention_mask.bool()
|
717 |
+
else:
|
718 |
+
attention_mask = None
|
719 |
+
|
720 |
+
# MHA
|
721 |
+
if self.n_head == self.n_head_kv:
|
722 |
+
if past_key_values is None:
|
723 |
+
# If `past_key_values` are not supplied, we run self-attention
|
724 |
+
attn_output = self._forward_self_attn(x, attention_mask)
|
725 |
+
else:
|
726 |
+
# If `past_key_values` are supplied, it means that we might have cached values and
|
727 |
+
# could take advantage of cross-attention
|
728 |
+
attn_output = self._forward_cross_attn(x, past_key_values, attention_mask)
|
729 |
+
# MQA / GQA
|
730 |
+
else:
|
731 |
+
# Regardless of `past_key_values` being supplied or not, it always use cross-attention
|
732 |
+
# because `q` and `kv` lengths might be different
|
733 |
+
attn_output = self._forward_cross_attn(x, past_key_values, attention_mask)
|
734 |
+
|
735 |
+
output = rearrange(attn_output, "... h d -> ... (h d)")
|
736 |
+
output = self.out_proj(output)
|
737 |
+
|
738 |
+
return output if not self.return_residual else (output, x)
|
739 |
+
|
740 |
+
|
741 |
+
class ParallelBlock(nn.Module):
|
742 |
+
"""Parallel block.
|
743 |
+
|
744 |
+
This block applies parallel mixer and MLP layers to the input (used in GPT-J and CodeGen).
|
745 |
+
|
746 |
+
"""
|
747 |
+
|
748 |
+
def __init__(
|
749 |
+
self,
|
750 |
+
config: PretrainedConfig,
|
751 |
+
block_idx: Optional[int] = None,
|
752 |
+
) -> None:
|
753 |
+
super().__init__()
|
754 |
+
|
755 |
+
self.ln = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
|
756 |
+
self.resid_dropout = nn.Dropout(config.resid_pdrop)
|
757 |
+
self.block_idx = block_idx
|
758 |
+
|
759 |
+
self.mixer = MHA(config, layer_idx=block_idx)
|
760 |
+
self.mlp = MLP(config)
|
761 |
+
|
762 |
+
def forward(
|
763 |
+
self,
|
764 |
+
hidden_states: torch.FloatTensor,
|
765 |
+
past_key_values: Optional[Union[torch.FloatTensor, InferenceParams]] = None,
|
766 |
+
attention_mask: Optional[torch.BoolTensor] = None,
|
767 |
+
**kwargs,
|
768 |
+
) -> torch.FloatTensor:
|
769 |
+
residual = hidden_states
|
770 |
+
hidden_states = self.ln(hidden_states)
|
771 |
+
|
772 |
+
attn_outputs = self.mixer(
|
773 |
+
hidden_states,
|
774 |
+
past_key_values=past_key_values,
|
775 |
+
attention_mask=attention_mask,
|
776 |
+
)
|
777 |
+
if isinstance(attn_outputs, tuple):
|
778 |
+
attn_outputs = attn_outputs[0]
|
779 |
+
|
780 |
+
attn_outputs = self.resid_dropout(attn_outputs)
|
781 |
+
feed_forward_hidden_states = self.resid_dropout(self.mlp(hidden_states))
|
782 |
+
|
783 |
+
hidden_states = attn_outputs + feed_forward_hidden_states + residual
|
784 |
+
|
785 |
+
return hidden_states
|
786 |
+
|
787 |
+
|
788 |
+
class CausalLMHead(nn.Module):
|
789 |
+
"""Causal Language Modeling head.
|
790 |
+
|
791 |
+
Reference:
|
792 |
+
Improving Language Understanding by Generative Pre-Training.
|
793 |
+
https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf.
|
794 |
+
|
795 |
+
"""
|
796 |
+
|
797 |
+
def __init__(self, config: PretrainedConfig) -> None:
|
798 |
+
super().__init__()
|
799 |
+
|
800 |
+
self.ln = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
|
801 |
+
self.linear = nn.Linear(config.n_embd, config.vocab_size)
|
802 |
+
|
803 |
+
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
|
804 |
+
hidden_states = self.ln(hidden_states)
|
805 |
+
logits = self.linear(hidden_states).to(torch.float32)
|
806 |
+
|
807 |
+
return logits
|
808 |
+
|
809 |
+
|
810 |
+
class CausalLMLoss(nn.Module):
|
811 |
+
"""Causal Language Modeling loss.
|
812 |
+
|
813 |
+
Reference:
|
814 |
+
Improving Language Understanding by Generative Pre-Training.
|
815 |
+
https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf.
|
816 |
+
|
817 |
+
"""
|
818 |
+
|
819 |
+
def __init__(self, shift_labels: bool = True) -> None:
|
820 |
+
super().__init__()
|
821 |
+
|
822 |
+
self.shift_labels = shift_labels
|
823 |
+
self.loss_fct = nn.CrossEntropyLoss()
|
824 |
+
|
825 |
+
def forward(self, logits: torch.FloatTensor, labels: torch.LongTensor) -> torch.FloatTensor:
|
826 |
+
if self.shift_labels:
|
827 |
+
logits = logits[..., :-1, :].contiguous()
|
828 |
+
labels = labels[..., 1:].contiguous()
|
829 |
+
|
830 |
+
loss = self.loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1))
|
831 |
+
|
832 |
+
return loss
|
833 |
+
|
834 |
+
|
835 |
+
class PhiPreTrainedModel(PreTrainedModel):
|
836 |
+
"""Phi pre-trained model."""
|
837 |
+
|
838 |
+
config_class = PhiConfig
|
839 |
+
base_model_prefix = "transformer"
|
840 |
+
supports_gradient_checkpointing = True
|
841 |
+
_no_split_modules = ["ParallelBlock"]
|
842 |
+
|
843 |
+
def __init__(self, *inputs, **kwargs) -> None:
|
844 |
+
super().__init__(*inputs, **kwargs)
|
845 |
+
|
846 |
+
def _init_weights(self, module: nn.Module) -> None:
|
847 |
+
if isinstance(module, (nn.Linear,)):
|
848 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
849 |
+
if module.bias is not None:
|
850 |
+
module.bias.data.zero_()
|
851 |
+
elif isinstance(module, nn.Embedding):
|
852 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
853 |
+
if module.padding_idx is not None:
|
854 |
+
module.weight.data[module.padding_idx].zero_()
|
855 |
+
elif isinstance(module, nn.LayerNorm):
|
856 |
+
if module.bias is not None:
|
857 |
+
module.bias.data.zero_()
|
858 |
+
module.weight.data.fill_(1.0)
|
859 |
+
|
860 |
+
|
861 |
+
def _set_gradient_checkpointing(self, module, value=False):
|
862 |
+
if isinstance(module, MHA):
|
863 |
+
module.checkpointing = value
|
864 |
+
|
865 |
+
def prepare_inputs_for_generation(
|
866 |
+
self,
|
867 |
+
input_ids: torch.LongTensor,
|
868 |
+
past_key_values: Optional[Union[torch.FloatTensor, InferenceParams]] = None,
|
869 |
+
attention_mask: Optional[Union[torch.LongTensor, torch.BoolTensor]] = None,
|
870 |
+
**kwargs,
|
871 |
+
) -> Dict[str, Any]:
|
872 |
+
if past_key_values is None or not (isinstance(past_key_values, InferenceParams)):
|
873 |
+
past_key_values = InferenceParams(
|
874 |
+
max_seqlen=self.config.n_positions,
|
875 |
+
max_batch_size=input_ids.shape[0],
|
876 |
+
seqlen_offset=0,
|
877 |
+
batch_size_offset=0,
|
878 |
+
key_value_memory_dict={},
|
879 |
+
lengths_per_sample=None,
|
880 |
+
)
|
881 |
+
else:
|
882 |
+
# Assume that `past_key_values` has cached all tokens up to the last token in `input_ids`
|
883 |
+
past_key_values.seqlen_offset = input_ids.shape[1] - 1
|
884 |
+
input_ids = input_ids[:, -1].unsqueeze(-1)
|
885 |
+
|
886 |
+
return {
|
887 |
+
"input_ids": input_ids,
|
888 |
+
"past_key_values": past_key_values,
|
889 |
+
"attention_mask": attention_mask,
|
890 |
+
}
|
891 |
+
|
892 |
+
|
893 |
+
class PhiModel(PhiPreTrainedModel):
|
894 |
+
"""Phi model."""
|
895 |
+
|
896 |
+
_keys_to_ignore_on_load_missing = [""]
|
897 |
+
_keys_to_ignore_on_load_unexpected = [r"h\.\d+\.mlp.(fc_in|fc_out)\.(weight|bias)"]
|
898 |
+
|
899 |
+
def __init__(self, config: PhiConfig) -> None:
|
900 |
+
super().__init__(config)
|
901 |
+
|
902 |
+
self.embd = Embedding(config)
|
903 |
+
self.h = nn.ModuleList([ParallelBlock(config, block_idx=i) for i in range(config.n_layer)])
|
904 |
+
self.gradient_checkpointing = False
|
905 |
+
self.post_init()
|
906 |
+
|
907 |
+
def get_input_embeddings(self) -> nn.Embedding:
|
908 |
+
return self.embd.wte
|
909 |
+
|
910 |
+
def set_input_embeddings(self, new_embeddings: nn.Embedding) -> None:
|
911 |
+
self.embd.wte = new_embeddings
|
912 |
+
|
913 |
+
def forward(
|
914 |
+
self,
|
915 |
+
input_ids: torch.LongTensor,
|
916 |
+
past_key_values: Optional[Union[torch.FloatTensor, InferenceParams]] = None,
|
917 |
+
attention_mask: Optional[torch.BoolTensor] = None,
|
918 |
+
) -> torch.FloatTensor:
|
919 |
+
hidden_states = self.embd(input_ids)
|
920 |
+
|
921 |
+
for layer in self.h:
|
922 |
+
hidden_states = layer(
|
923 |
+
hidden_states,
|
924 |
+
past_key_values=past_key_values,
|
925 |
+
attention_mask=attention_mask,
|
926 |
+
)
|
927 |
+
|
928 |
+
return hidden_states
|
929 |
+
|
930 |
+
|
931 |
+
class PhiForCausalLM(PhiPreTrainedModel):
|
932 |
+
"""Phi for Causal Language Modeling."""
|
933 |
+
|
934 |
+
_keys_to_ignore_on_load_missing = [""]
|
935 |
+
_keys_to_ignore_on_load_unexpected = [r"transformer\.h\.\d+\.mlp.(fc_in|fc_out)\.(weight|bias)"]
|
936 |
+
|
937 |
+
def __init__(self, config: PhiConfig) -> None:
|
938 |
+
super().__init__(config)
|
939 |
+
|
940 |
+
self.transformer = PhiModel(config)
|
941 |
+
self.lm_head = CausalLMHead(config)
|
942 |
+
self.loss = CausalLMLoss()
|
943 |
+
|
944 |
+
self.post_init()
|
945 |
+
|
946 |
+
def get_output_embeddings(self) -> nn.Linear:
|
947 |
+
return self.lm_head.linear
|
948 |
+
|
949 |
+
def set_output_embeddings(self, new_embeddings: nn.Linear) -> None:
|
950 |
+
self.lm_head.linear = new_embeddings
|
951 |
+
|
952 |
+
def forward(
|
953 |
+
self,
|
954 |
+
input_ids: torch.LongTensor,
|
955 |
+
past_key_values: Optional[Union[torch.FloatTensor, InferenceParams]] = None,
|
956 |
+
attention_mask: Optional[torch.BoolTensor] = None,
|
957 |
+
labels: Optional[torch.LongTensor] = None,
|
958 |
+
**kwargs,
|
959 |
+
) -> CausalLMOutputWithPast:
|
960 |
+
hidden_states = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask)
|
961 |
+
lm_logits = self.lm_head(hidden_states)
|
962 |
+
|
963 |
+
loss = None
|
964 |
+
if labels is not None:
|
965 |
+
loss = self.loss(lm_logits, labels)
|
966 |
+
|
967 |
+
return CausalLMOutputWithPast(loss=loss, logits=lm_logits, past_key_values=past_key_values)
|
pytorch_model-00001-of-00002.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c9f072579ad2fb4bd697bbbcc41528a922bfb5bbb30da68da549d486ac094f81
|
3 |
+
size 4982535608
|
pytorch_model-00002-of-00002.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8f5051774cf8662856e8be12e3af76d616a40ab2929ddcfa935169ceb3cfdf07
|
3 |
+
size 583820991
|
pytorch_model.bin.index.json
ADDED
@@ -0,0 +1,334 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_size": 5566248960
|
4 |
+
},
|
5 |
+
"weight_map": {
|
6 |
+
"lm_head.linear.bias": "pytorch_model-00002-of-00002.bin",
|
7 |
+
"lm_head.linear.lora_A.default.weight": "pytorch_model-00002-of-00002.bin",
|
8 |
+
"lm_head.linear.lora_B.default.weight": "pytorch_model-00002-of-00002.bin",
|
9 |
+
"lm_head.linear.weight": "pytorch_model-00002-of-00002.bin",
|
10 |
+
"lm_head.ln.bias": "pytorch_model-00002-of-00002.bin",
|
11 |
+
"lm_head.ln.weight": "pytorch_model-00002-of-00002.bin",
|
12 |
+
"transformer.embd.wte.weight": "pytorch_model-00001-of-00002.bin",
|
13 |
+
"transformer.h.0.ln.bias": "pytorch_model-00001-of-00002.bin",
|
14 |
+
"transformer.h.0.ln.weight": "pytorch_model-00001-of-00002.bin",
|
15 |
+
"transformer.h.0.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
16 |
+
"transformer.h.0.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
17 |
+
"transformer.h.0.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
18 |
+
"transformer.h.0.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
19 |
+
"transformer.h.0.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
20 |
+
"transformer.h.0.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
21 |
+
"transformer.h.0.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
22 |
+
"transformer.h.0.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
23 |
+
"transformer.h.1.ln.bias": "pytorch_model-00001-of-00002.bin",
|
24 |
+
"transformer.h.1.ln.weight": "pytorch_model-00001-of-00002.bin",
|
25 |
+
"transformer.h.1.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
26 |
+
"transformer.h.1.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
27 |
+
"transformer.h.1.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
28 |
+
"transformer.h.1.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
29 |
+
"transformer.h.1.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
30 |
+
"transformer.h.1.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
31 |
+
"transformer.h.1.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
32 |
+
"transformer.h.1.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
33 |
+
"transformer.h.10.ln.bias": "pytorch_model-00001-of-00002.bin",
|
34 |
+
"transformer.h.10.ln.weight": "pytorch_model-00001-of-00002.bin",
|
35 |
+
"transformer.h.10.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
36 |
+
"transformer.h.10.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
37 |
+
"transformer.h.10.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
38 |
+
"transformer.h.10.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
39 |
+
"transformer.h.10.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
40 |
+
"transformer.h.10.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
41 |
+
"transformer.h.10.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
42 |
+
"transformer.h.10.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
43 |
+
"transformer.h.11.ln.bias": "pytorch_model-00001-of-00002.bin",
|
44 |
+
"transformer.h.11.ln.weight": "pytorch_model-00001-of-00002.bin",
|
45 |
+
"transformer.h.11.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
46 |
+
"transformer.h.11.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
47 |
+
"transformer.h.11.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
48 |
+
"transformer.h.11.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
49 |
+
"transformer.h.11.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
50 |
+
"transformer.h.11.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
51 |
+
"transformer.h.11.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
52 |
+
"transformer.h.11.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
53 |
+
"transformer.h.12.ln.bias": "pytorch_model-00001-of-00002.bin",
|
54 |
+
"transformer.h.12.ln.weight": "pytorch_model-00001-of-00002.bin",
|
55 |
+
"transformer.h.12.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
56 |
+
"transformer.h.12.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
57 |
+
"transformer.h.12.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
58 |
+
"transformer.h.12.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
59 |
+
"transformer.h.12.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
60 |
+
"transformer.h.12.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
61 |
+
"transformer.h.12.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
62 |
+
"transformer.h.12.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
63 |
+
"transformer.h.13.ln.bias": "pytorch_model-00001-of-00002.bin",
|
64 |
+
"transformer.h.13.ln.weight": "pytorch_model-00001-of-00002.bin",
|
65 |
+
"transformer.h.13.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
66 |
+
"transformer.h.13.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
67 |
+
"transformer.h.13.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
68 |
+
"transformer.h.13.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
69 |
+
"transformer.h.13.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
70 |
+
"transformer.h.13.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
71 |
+
"transformer.h.13.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
72 |
+
"transformer.h.13.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
73 |
+
"transformer.h.14.ln.bias": "pytorch_model-00001-of-00002.bin",
|
74 |
+
"transformer.h.14.ln.weight": "pytorch_model-00001-of-00002.bin",
|
75 |
+
"transformer.h.14.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
76 |
+
"transformer.h.14.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
77 |
+
"transformer.h.14.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
78 |
+
"transformer.h.14.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
79 |
+
"transformer.h.14.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
80 |
+
"transformer.h.14.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
81 |
+
"transformer.h.14.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
82 |
+
"transformer.h.14.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
83 |
+
"transformer.h.15.ln.bias": "pytorch_model-00001-of-00002.bin",
|
84 |
+
"transformer.h.15.ln.weight": "pytorch_model-00001-of-00002.bin",
|
85 |
+
"transformer.h.15.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
86 |
+
"transformer.h.15.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
87 |
+
"transformer.h.15.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
88 |
+
"transformer.h.15.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
89 |
+
"transformer.h.15.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
90 |
+
"transformer.h.15.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
91 |
+
"transformer.h.15.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
92 |
+
"transformer.h.15.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
93 |
+
"transformer.h.16.ln.bias": "pytorch_model-00001-of-00002.bin",
|
94 |
+
"transformer.h.16.ln.weight": "pytorch_model-00001-of-00002.bin",
|
95 |
+
"transformer.h.16.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
96 |
+
"transformer.h.16.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
97 |
+
"transformer.h.16.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
98 |
+
"transformer.h.16.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
99 |
+
"transformer.h.16.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
100 |
+
"transformer.h.16.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
101 |
+
"transformer.h.16.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
102 |
+
"transformer.h.16.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
103 |
+
"transformer.h.17.ln.bias": "pytorch_model-00001-of-00002.bin",
|
104 |
+
"transformer.h.17.ln.weight": "pytorch_model-00001-of-00002.bin",
|
105 |
+
"transformer.h.17.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
106 |
+
"transformer.h.17.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
107 |
+
"transformer.h.17.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
108 |
+
"transformer.h.17.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
109 |
+
"transformer.h.17.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
110 |
+
"transformer.h.17.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
111 |
+
"transformer.h.17.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
112 |
+
"transformer.h.17.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
113 |
+
"transformer.h.18.ln.bias": "pytorch_model-00001-of-00002.bin",
|
114 |
+
"transformer.h.18.ln.weight": "pytorch_model-00001-of-00002.bin",
|
115 |
+
"transformer.h.18.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
116 |
+
"transformer.h.18.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
117 |
+
"transformer.h.18.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
118 |
+
"transformer.h.18.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
119 |
+
"transformer.h.18.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
120 |
+
"transformer.h.18.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
121 |
+
"transformer.h.18.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
122 |
+
"transformer.h.18.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
123 |
+
"transformer.h.19.ln.bias": "pytorch_model-00001-of-00002.bin",
|
124 |
+
"transformer.h.19.ln.weight": "pytorch_model-00001-of-00002.bin",
|
125 |
+
"transformer.h.19.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
126 |
+
"transformer.h.19.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
127 |
+
"transformer.h.19.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
128 |
+
"transformer.h.19.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
129 |
+
"transformer.h.19.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
130 |
+
"transformer.h.19.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
131 |
+
"transformer.h.19.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
132 |
+
"transformer.h.19.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
133 |
+
"transformer.h.2.ln.bias": "pytorch_model-00001-of-00002.bin",
|
134 |
+
"transformer.h.2.ln.weight": "pytorch_model-00001-of-00002.bin",
|
135 |
+
"transformer.h.2.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
136 |
+
"transformer.h.2.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
137 |
+
"transformer.h.2.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
138 |
+
"transformer.h.2.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
139 |
+
"transformer.h.2.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
140 |
+
"transformer.h.2.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
141 |
+
"transformer.h.2.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
142 |
+
"transformer.h.2.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
143 |
+
"transformer.h.20.ln.bias": "pytorch_model-00001-of-00002.bin",
|
144 |
+
"transformer.h.20.ln.weight": "pytorch_model-00001-of-00002.bin",
|
145 |
+
"transformer.h.20.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
146 |
+
"transformer.h.20.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
147 |
+
"transformer.h.20.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
148 |
+
"transformer.h.20.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
149 |
+
"transformer.h.20.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
150 |
+
"transformer.h.20.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
151 |
+
"transformer.h.20.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
152 |
+
"transformer.h.20.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
153 |
+
"transformer.h.21.ln.bias": "pytorch_model-00001-of-00002.bin",
|
154 |
+
"transformer.h.21.ln.weight": "pytorch_model-00001-of-00002.bin",
|
155 |
+
"transformer.h.21.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
156 |
+
"transformer.h.21.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
157 |
+
"transformer.h.21.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
158 |
+
"transformer.h.21.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
159 |
+
"transformer.h.21.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
160 |
+
"transformer.h.21.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
161 |
+
"transformer.h.21.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
162 |
+
"transformer.h.21.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
163 |
+
"transformer.h.22.ln.bias": "pytorch_model-00001-of-00002.bin",
|
164 |
+
"transformer.h.22.ln.weight": "pytorch_model-00001-of-00002.bin",
|
165 |
+
"transformer.h.22.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
166 |
+
"transformer.h.22.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
167 |
+
"transformer.h.22.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
168 |
+
"transformer.h.22.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
169 |
+
"transformer.h.22.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
170 |
+
"transformer.h.22.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
171 |
+
"transformer.h.22.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
172 |
+
"transformer.h.22.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
173 |
+
"transformer.h.23.ln.bias": "pytorch_model-00001-of-00002.bin",
|
174 |
+
"transformer.h.23.ln.weight": "pytorch_model-00001-of-00002.bin",
|
175 |
+
"transformer.h.23.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
176 |
+
"transformer.h.23.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
177 |
+
"transformer.h.23.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
178 |
+
"transformer.h.23.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
179 |
+
"transformer.h.23.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
180 |
+
"transformer.h.23.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
181 |
+
"transformer.h.23.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
182 |
+
"transformer.h.23.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
183 |
+
"transformer.h.24.ln.bias": "pytorch_model-00001-of-00002.bin",
|
184 |
+
"transformer.h.24.ln.weight": "pytorch_model-00001-of-00002.bin",
|
185 |
+
"transformer.h.24.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
186 |
+
"transformer.h.24.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
187 |
+
"transformer.h.24.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
188 |
+
"transformer.h.24.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
189 |
+
"transformer.h.24.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
190 |
+
"transformer.h.24.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
191 |
+
"transformer.h.24.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
192 |
+
"transformer.h.24.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
193 |
+
"transformer.h.25.ln.bias": "pytorch_model-00001-of-00002.bin",
|
194 |
+
"transformer.h.25.ln.weight": "pytorch_model-00001-of-00002.bin",
|
195 |
+
"transformer.h.25.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
196 |
+
"transformer.h.25.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
197 |
+
"transformer.h.25.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
198 |
+
"transformer.h.25.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
199 |
+
"transformer.h.25.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
200 |
+
"transformer.h.25.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
201 |
+
"transformer.h.25.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
202 |
+
"transformer.h.25.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
203 |
+
"transformer.h.26.ln.bias": "pytorch_model-00001-of-00002.bin",
|
204 |
+
"transformer.h.26.ln.weight": "pytorch_model-00001-of-00002.bin",
|
205 |
+
"transformer.h.26.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
206 |
+
"transformer.h.26.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
207 |
+
"transformer.h.26.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
208 |
+
"transformer.h.26.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
209 |
+
"transformer.h.26.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
210 |
+
"transformer.h.26.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
211 |
+
"transformer.h.26.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
212 |
+
"transformer.h.26.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
213 |
+
"transformer.h.27.ln.bias": "pytorch_model-00001-of-00002.bin",
|
214 |
+
"transformer.h.27.ln.weight": "pytorch_model-00001-of-00002.bin",
|
215 |
+
"transformer.h.27.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
216 |
+
"transformer.h.27.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
217 |
+
"transformer.h.27.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
218 |
+
"transformer.h.27.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
219 |
+
"transformer.h.27.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
220 |
+
"transformer.h.27.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
221 |
+
"transformer.h.27.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
222 |
+
"transformer.h.27.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
223 |
+
"transformer.h.28.ln.bias": "pytorch_model-00001-of-00002.bin",
|
224 |
+
"transformer.h.28.ln.weight": "pytorch_model-00001-of-00002.bin",
|
225 |
+
"transformer.h.28.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
226 |
+
"transformer.h.28.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
227 |
+
"transformer.h.28.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
228 |
+
"transformer.h.28.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
229 |
+
"transformer.h.28.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
230 |
+
"transformer.h.28.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
231 |
+
"transformer.h.28.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
232 |
+
"transformer.h.28.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
233 |
+
"transformer.h.29.ln.bias": "pytorch_model-00001-of-00002.bin",
|
234 |
+
"transformer.h.29.ln.weight": "pytorch_model-00001-of-00002.bin",
|
235 |
+
"transformer.h.29.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
236 |
+
"transformer.h.29.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
237 |
+
"transformer.h.29.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
238 |
+
"transformer.h.29.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
239 |
+
"transformer.h.29.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
240 |
+
"transformer.h.29.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
241 |
+
"transformer.h.29.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
242 |
+
"transformer.h.29.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
243 |
+
"transformer.h.3.ln.bias": "pytorch_model-00001-of-00002.bin",
|
244 |
+
"transformer.h.3.ln.weight": "pytorch_model-00001-of-00002.bin",
|
245 |
+
"transformer.h.3.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
246 |
+
"transformer.h.3.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
247 |
+
"transformer.h.3.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
248 |
+
"transformer.h.3.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
249 |
+
"transformer.h.3.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
250 |
+
"transformer.h.3.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
251 |
+
"transformer.h.3.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
252 |
+
"transformer.h.3.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
253 |
+
"transformer.h.30.ln.bias": "pytorch_model-00001-of-00002.bin",
|
254 |
+
"transformer.h.30.ln.weight": "pytorch_model-00001-of-00002.bin",
|
255 |
+
"transformer.h.30.mixer.Wqkv.bias": "pytorch_model-00002-of-00002.bin",
|
256 |
+
"transformer.h.30.mixer.Wqkv.weight": "pytorch_model-00002-of-00002.bin",
|
257 |
+
"transformer.h.30.mixer.out_proj.bias": "pytorch_model-00002-of-00002.bin",
|
258 |
+
"transformer.h.30.mixer.out_proj.weight": "pytorch_model-00002-of-00002.bin",
|
259 |
+
"transformer.h.30.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin",
|
260 |
+
"transformer.h.30.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin",
|
261 |
+
"transformer.h.30.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin",
|
262 |
+
"transformer.h.30.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin",
|
263 |
+
"transformer.h.31.ln.bias": "pytorch_model-00002-of-00002.bin",
|
264 |
+
"transformer.h.31.ln.weight": "pytorch_model-00002-of-00002.bin",
|
265 |
+
"transformer.h.31.mixer.Wqkv.bias": "pytorch_model-00002-of-00002.bin",
|
266 |
+
"transformer.h.31.mixer.Wqkv.weight": "pytorch_model-00002-of-00002.bin",
|
267 |
+
"transformer.h.31.mixer.out_proj.bias": "pytorch_model-00002-of-00002.bin",
|
268 |
+
"transformer.h.31.mixer.out_proj.weight": "pytorch_model-00002-of-00002.bin",
|
269 |
+
"transformer.h.31.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin",
|
270 |
+
"transformer.h.31.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin",
|
271 |
+
"transformer.h.31.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin",
|
272 |
+
"transformer.h.31.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin",
|
273 |
+
"transformer.h.4.ln.bias": "pytorch_model-00001-of-00002.bin",
|
274 |
+
"transformer.h.4.ln.weight": "pytorch_model-00001-of-00002.bin",
|
275 |
+
"transformer.h.4.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
276 |
+
"transformer.h.4.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
277 |
+
"transformer.h.4.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
278 |
+
"transformer.h.4.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
279 |
+
"transformer.h.4.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
280 |
+
"transformer.h.4.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
281 |
+
"transformer.h.4.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
282 |
+
"transformer.h.4.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
283 |
+
"transformer.h.5.ln.bias": "pytorch_model-00001-of-00002.bin",
|
284 |
+
"transformer.h.5.ln.weight": "pytorch_model-00001-of-00002.bin",
|
285 |
+
"transformer.h.5.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
286 |
+
"transformer.h.5.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
287 |
+
"transformer.h.5.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
288 |
+
"transformer.h.5.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
289 |
+
"transformer.h.5.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
290 |
+
"transformer.h.5.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
291 |
+
"transformer.h.5.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
292 |
+
"transformer.h.5.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
293 |
+
"transformer.h.6.ln.bias": "pytorch_model-00001-of-00002.bin",
|
294 |
+
"transformer.h.6.ln.weight": "pytorch_model-00001-of-00002.bin",
|
295 |
+
"transformer.h.6.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
296 |
+
"transformer.h.6.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
297 |
+
"transformer.h.6.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
298 |
+
"transformer.h.6.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
299 |
+
"transformer.h.6.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
300 |
+
"transformer.h.6.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
301 |
+
"transformer.h.6.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
302 |
+
"transformer.h.6.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
303 |
+
"transformer.h.7.ln.bias": "pytorch_model-00001-of-00002.bin",
|
304 |
+
"transformer.h.7.ln.weight": "pytorch_model-00001-of-00002.bin",
|
305 |
+
"transformer.h.7.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
306 |
+
"transformer.h.7.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
307 |
+
"transformer.h.7.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
308 |
+
"transformer.h.7.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
309 |
+
"transformer.h.7.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
310 |
+
"transformer.h.7.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
311 |
+
"transformer.h.7.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
312 |
+
"transformer.h.7.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
313 |
+
"transformer.h.8.ln.bias": "pytorch_model-00001-of-00002.bin",
|
314 |
+
"transformer.h.8.ln.weight": "pytorch_model-00001-of-00002.bin",
|
315 |
+
"transformer.h.8.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
316 |
+
"transformer.h.8.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
317 |
+
"transformer.h.8.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
318 |
+
"transformer.h.8.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
319 |
+
"transformer.h.8.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
320 |
+
"transformer.h.8.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
321 |
+
"transformer.h.8.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
322 |
+
"transformer.h.8.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin",
|
323 |
+
"transformer.h.9.ln.bias": "pytorch_model-00001-of-00002.bin",
|
324 |
+
"transformer.h.9.ln.weight": "pytorch_model-00001-of-00002.bin",
|
325 |
+
"transformer.h.9.mixer.Wqkv.bias": "pytorch_model-00001-of-00002.bin",
|
326 |
+
"transformer.h.9.mixer.Wqkv.weight": "pytorch_model-00001-of-00002.bin",
|
327 |
+
"transformer.h.9.mixer.out_proj.bias": "pytorch_model-00001-of-00002.bin",
|
328 |
+
"transformer.h.9.mixer.out_proj.weight": "pytorch_model-00001-of-00002.bin",
|
329 |
+
"transformer.h.9.mlp.fc1.bias": "pytorch_model-00001-of-00002.bin",
|
330 |
+
"transformer.h.9.mlp.fc1.weight": "pytorch_model-00001-of-00002.bin",
|
331 |
+
"transformer.h.9.mlp.fc2.bias": "pytorch_model-00001-of-00002.bin",
|
332 |
+
"transformer.h.9.mlp.fc2.weight": "pytorch_model-00001-of-00002.bin"
|
333 |
+
}
|
334 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<|endoftext|>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "<|im_end|>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "<|endoftext|>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"unk_token": {
|
24 |
+
"content": "<|endoftext|>",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
}
|
30 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,341 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"added_tokens_decoder": {
|
4 |
+
"50256": {
|
5 |
+
"content": "<|endoftext|>",
|
6 |
+
"lstrip": false,
|
7 |
+
"normalized": false,
|
8 |
+
"rstrip": false,
|
9 |
+
"single_word": false,
|
10 |
+
"special": true
|
11 |
+
},
|
12 |
+
"50257": {
|
13 |
+
"content": " ",
|
14 |
+
"lstrip": false,
|
15 |
+
"normalized": true,
|
16 |
+
"rstrip": false,
|
17 |
+
"single_word": false,
|
18 |
+
"special": false
|
19 |
+
},
|
20 |
+
"50258": {
|
21 |
+
"content": " ",
|
22 |
+
"lstrip": false,
|
23 |
+
"normalized": true,
|
24 |
+
"rstrip": false,
|
25 |
+
"single_word": false,
|
26 |
+
"special": false
|
27 |
+
},
|
28 |
+
"50259": {
|
29 |
+
"content": " ",
|
30 |
+
"lstrip": false,
|
31 |
+
"normalized": true,
|
32 |
+
"rstrip": false,
|
33 |
+
"single_word": false,
|
34 |
+
"special": false
|
35 |
+
},
|
36 |
+
"50260": {
|
37 |
+
"content": " ",
|
38 |
+
"lstrip": false,
|
39 |
+
"normalized": true,
|
40 |
+
"rstrip": false,
|
41 |
+
"single_word": false,
|
42 |
+
"special": false
|
43 |
+
},
|
44 |
+
"50261": {
|
45 |
+
"content": " ",
|
46 |
+
"lstrip": false,
|
47 |
+
"normalized": true,
|
48 |
+
"rstrip": false,
|
49 |
+
"single_word": false,
|
50 |
+
"special": false
|
51 |
+
},
|
52 |
+
"50262": {
|
53 |
+
"content": " ",
|
54 |
+
"lstrip": false,
|
55 |
+
"normalized": true,
|
56 |
+
"rstrip": false,
|
57 |
+
"single_word": false,
|
58 |
+
"special": false
|
59 |
+
},
|
60 |
+
"50263": {
|
61 |
+
"content": " ",
|
62 |
+
"lstrip": false,
|
63 |
+
"normalized": true,
|
64 |
+
"rstrip": false,
|
65 |
+
"single_word": false,
|
66 |
+
"special": false
|
67 |
+
},
|
68 |
+
"50264": {
|
69 |
+
"content": " ",
|
70 |
+
"lstrip": false,
|
71 |
+
"normalized": true,
|
72 |
+
"rstrip": false,
|
73 |
+
"single_word": false,
|
74 |
+
"special": false
|
75 |
+
},
|
76 |
+
"50265": {
|
77 |
+
"content": " ",
|
78 |
+
"lstrip": false,
|
79 |
+
"normalized": true,
|
80 |
+
"rstrip": false,
|
81 |
+
"single_word": false,
|
82 |
+
"special": false
|
83 |
+
},
|
84 |
+
"50266": {
|
85 |
+
"content": " ",
|
86 |
+
"lstrip": false,
|
87 |
+
"normalized": true,
|
88 |
+
"rstrip": false,
|
89 |
+
"single_word": false,
|
90 |
+
"special": false
|
91 |
+
},
|
92 |
+
"50267": {
|
93 |
+
"content": " ",
|
94 |
+
"lstrip": false,
|
95 |
+
"normalized": true,
|
96 |
+
"rstrip": false,
|
97 |
+
"single_word": false,
|
98 |
+
"special": false
|
99 |
+
},
|
100 |
+
"50268": {
|
101 |
+
"content": " ",
|
102 |
+
"lstrip": false,
|
103 |
+
"normalized": true,
|
104 |
+
"rstrip": false,
|
105 |
+
"single_word": false,
|
106 |
+
"special": false
|
107 |
+
},
|
108 |
+
"50269": {
|
109 |
+
"content": " ",
|
110 |
+
"lstrip": false,
|
111 |
+
"normalized": true,
|
112 |
+
"rstrip": false,
|
113 |
+
"single_word": false,
|
114 |
+
"special": false
|
115 |
+
},
|
116 |
+
"50270": {
|
117 |
+
"content": " ",
|
118 |
+
"lstrip": false,
|
119 |
+
"normalized": true,
|
120 |
+
"rstrip": false,
|
121 |
+
"single_word": false,
|
122 |
+
"special": false
|
123 |
+
},
|
124 |
+
"50271": {
|
125 |
+
"content": " ",
|
126 |
+
"lstrip": false,
|
127 |
+
"normalized": true,
|
128 |
+
"rstrip": false,
|
129 |
+
"single_word": false,
|
130 |
+
"special": false
|
131 |
+
},
|
132 |
+
"50272": {
|
133 |
+
"content": " ",
|
134 |
+
"lstrip": false,
|
135 |
+
"normalized": true,
|
136 |
+
"rstrip": false,
|
137 |
+
"single_word": false,
|
138 |
+
"special": false
|
139 |
+
},
|
140 |
+
"50273": {
|
141 |
+
"content": " ",
|
142 |
+
"lstrip": false,
|
143 |
+
"normalized": true,
|
144 |
+
"rstrip": false,
|
145 |
+
"single_word": false,
|
146 |
+
"special": false
|
147 |
+
},
|
148 |
+
"50274": {
|
149 |
+
"content": " ",
|
150 |
+
"lstrip": false,
|
151 |
+
"normalized": true,
|
152 |
+
"rstrip": false,
|
153 |
+
"single_word": false,
|
154 |
+
"special": false
|
155 |
+
},
|
156 |
+
"50275": {
|
157 |
+
"content": " ",
|
158 |
+
"lstrip": false,
|
159 |
+
"normalized": true,
|
160 |
+
"rstrip": false,
|
161 |
+
"single_word": false,
|
162 |
+
"special": false
|
163 |
+
},
|
164 |
+
"50276": {
|
165 |
+
"content": " ",
|
166 |
+
"lstrip": false,
|
167 |
+
"normalized": true,
|
168 |
+
"rstrip": false,
|
169 |
+
"single_word": false,
|
170 |
+
"special": false
|
171 |
+
},
|
172 |
+
"50277": {
|
173 |
+
"content": " ",
|
174 |
+
"lstrip": false,
|
175 |
+
"normalized": true,
|
176 |
+
"rstrip": false,
|
177 |
+
"single_word": false,
|
178 |
+
"special": false
|
179 |
+
},
|
180 |
+
"50278": {
|
181 |
+
"content": " ",
|
182 |
+
"lstrip": false,
|
183 |
+
"normalized": true,
|
184 |
+
"rstrip": false,
|
185 |
+
"single_word": false,
|
186 |
+
"special": false
|
187 |
+
},
|
188 |
+
"50279": {
|
189 |
+
"content": " ",
|
190 |
+
"lstrip": false,
|
191 |
+
"normalized": true,
|
192 |
+
"rstrip": false,
|
193 |
+
"single_word": false,
|
194 |
+
"special": false
|
195 |
+
},
|
196 |
+
"50280": {
|
197 |
+
"content": " ",
|
198 |
+
"lstrip": false,
|
199 |
+
"normalized": true,
|
200 |
+
"rstrip": false,
|
201 |
+
"single_word": false,
|
202 |
+
"special": false
|
203 |
+
},
|
204 |
+
"50281": {
|
205 |
+
"content": " ",
|
206 |
+
"lstrip": false,
|
207 |
+
"normalized": true,
|
208 |
+
"rstrip": false,
|
209 |
+
"single_word": false,
|
210 |
+
"special": false
|
211 |
+
},
|
212 |
+
"50282": {
|
213 |
+
"content": " ",
|
214 |
+
"lstrip": false,
|
215 |
+
"normalized": true,
|
216 |
+
"rstrip": false,
|
217 |
+
"single_word": false,
|
218 |
+
"special": false
|
219 |
+
},
|
220 |
+
"50283": {
|
221 |
+
"content": " ",
|
222 |
+
"lstrip": false,
|
223 |
+
"normalized": true,
|
224 |
+
"rstrip": false,
|
225 |
+
"single_word": false,
|
226 |
+
"special": false
|
227 |
+
},
|
228 |
+
"50284": {
|
229 |
+
"content": " ",
|
230 |
+
"lstrip": false,
|
231 |
+
"normalized": true,
|
232 |
+
"rstrip": false,
|
233 |
+
"single_word": false,
|
234 |
+
"special": false
|
235 |
+
},
|
236 |
+
"50285": {
|
237 |
+
"content": " ",
|
238 |
+
"lstrip": false,
|
239 |
+
"normalized": true,
|
240 |
+
"rstrip": false,
|
241 |
+
"single_word": false,
|
242 |
+
"special": false
|
243 |
+
},
|
244 |
+
"50286": {
|
245 |
+
"content": " ",
|
246 |
+
"lstrip": false,
|
247 |
+
"normalized": true,
|
248 |
+
"rstrip": false,
|
249 |
+
"single_word": false,
|
250 |
+
"special": false
|
251 |
+
},
|
252 |
+
"50287": {
|
253 |
+
"content": "\t\t\t\t\t\t\t\t\t",
|
254 |
+
"lstrip": false,
|
255 |
+
"normalized": true,
|
256 |
+
"rstrip": false,
|
257 |
+
"single_word": false,
|
258 |
+
"special": false
|
259 |
+
},
|
260 |
+
"50288": {
|
261 |
+
"content": "\t\t\t\t\t\t\t\t",
|
262 |
+
"lstrip": false,
|
263 |
+
"normalized": true,
|
264 |
+
"rstrip": false,
|
265 |
+
"single_word": false,
|
266 |
+
"special": false
|
267 |
+
},
|
268 |
+
"50289": {
|
269 |
+
"content": "\t\t\t\t\t\t\t",
|
270 |
+
"lstrip": false,
|
271 |
+
"normalized": true,
|
272 |
+
"rstrip": false,
|
273 |
+
"single_word": false,
|
274 |
+
"special": false
|
275 |
+
},
|
276 |
+
"50290": {
|
277 |
+
"content": "\t\t\t\t\t\t",
|
278 |
+
"lstrip": false,
|
279 |
+
"normalized": true,
|
280 |
+
"rstrip": false,
|
281 |
+
"single_word": false,
|
282 |
+
"special": false
|
283 |
+
},
|
284 |
+
"50291": {
|
285 |
+
"content": "\t\t\t\t\t",
|
286 |
+
"lstrip": false,
|
287 |
+
"normalized": true,
|
288 |
+
"rstrip": false,
|
289 |
+
"single_word": false,
|
290 |
+
"special": false
|
291 |
+
},
|
292 |
+
"50292": {
|
293 |
+
"content": "\t\t\t\t",
|
294 |
+
"lstrip": false,
|
295 |
+
"normalized": true,
|
296 |
+
"rstrip": false,
|
297 |
+
"single_word": false,
|
298 |
+
"special": false
|
299 |
+
},
|
300 |
+
"50293": {
|
301 |
+
"content": "\t\t\t",
|
302 |
+
"lstrip": false,
|
303 |
+
"normalized": true,
|
304 |
+
"rstrip": false,
|
305 |
+
"single_word": false,
|
306 |
+
"special": false
|
307 |
+
},
|
308 |
+
"50294": {
|
309 |
+
"content": "\t\t",
|
310 |
+
"lstrip": false,
|
311 |
+
"normalized": true,
|
312 |
+
"rstrip": false,
|
313 |
+
"single_word": false,
|
314 |
+
"special": false
|
315 |
+
},
|
316 |
+
"50295": {
|
317 |
+
"content": "<|im_end|>",
|
318 |
+
"lstrip": false,
|
319 |
+
"normalized": false,
|
320 |
+
"rstrip": false,
|
321 |
+
"single_word": false,
|
322 |
+
"special": true
|
323 |
+
},
|
324 |
+
"50296": {
|
325 |
+
"content": "<|im_start|>",
|
326 |
+
"lstrip": false,
|
327 |
+
"normalized": false,
|
328 |
+
"rstrip": false,
|
329 |
+
"single_word": false,
|
330 |
+
"special": false
|
331 |
+
}
|
332 |
+
},
|
333 |
+
"bos_token": "<|endoftext|>",
|
334 |
+
"chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
335 |
+
"clean_up_tokenization_spaces": true,
|
336 |
+
"eos_token": "<|im_end|>",
|
337 |
+
"model_max_length": 2048,
|
338 |
+
"pad_token": "<|endoftext|>",
|
339 |
+
"tokenizer_class": "CodeGenTokenizer",
|
340 |
+
"unk_token": "<|endoftext|>"
|
341 |
+
}
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|