root
commited on
Commit
•
6653aa9
1
Parent(s):
7dedf8a
upload
Browse files- .gitattributes copy +35 -0
- LICENSE.txt +126 -0
- README.md +323 -0
- USE_POLICY.md +50 -0
- config.json +76 -0
- configs/Llama2-70b-Drop16Attn/config.json +76 -0
- configs/Llama2-70b-Drop16Block/config.json +76 -0
- configs/Llama2-70b-Drop16MLP/config.json +76 -0
- configs/Llama2-70b-Drop32Attn/config.json +76 -0
- configs/Llama2-70b-Drop32Block/config.json +76 -0
- configs/Llama2-70b-Drop32MLP/config.json +76 -0
- configs/Llama2-70b-Drop40Attn/config.json +76 -0
- configs/Llama2-70b-Drop40Block/config.json +76 -0
- configs/Llama2-70b-Drop40MLP/config.json +76 -0
- configs/Llama2-70b-Drop48Attn/config.json +76 -0
- configs/Llama2-70b-Drop48Block/config.json +76 -0
- configs/Llama2-70b-Drop48MLP/config.json +76 -0
- configs/Llama2-70b-Drop4Attn/config.json +76 -0
- configs/Llama2-70b-Drop4Block/config.json +76 -0
- configs/Llama2-70b-Drop4MLP/config.json +76 -0
- configs/Llama2-70b-Drop8Attn/config.json +76 -0
- configs/Llama2-70b-Drop8Block/config.json +76 -0
- configs/Llama2-70b-Drop8MLP/config.json +76 -0
- configuration_dropped_llama.py +232 -0
- generation_config.json +10 -0
- model.safetensors.index.json +810 -0
- modeling_dropped_llama.py +1338 -0
- special_tokens_map.json +23 -0
- tokenizer.json +0 -0
- tokenizer.model +3 -0
- tokenizer_config.json +35 -0
.gitattributes copy
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
LICENSE.txt
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
LLAMA 2 COMMUNITY LICENSE AGREEMENT
|
2 |
+
Llama 2 Version Release Date: July 18, 2023
|
3 |
+
|
4 |
+
"Agreement" means the terms and conditions for use, reproduction, distribution and
|
5 |
+
modification of the Llama Materials set forth herein.
|
6 |
+
|
7 |
+
"Documentation" means the specifications, manuals and documentation
|
8 |
+
accompanying Llama 2 distributed by Meta at ai.meta.com/resources/models-and-
|
9 |
+
libraries/llama-downloads/.
|
10 |
+
|
11 |
+
"Licensee" or "you" means you, or your employer or any other person or entity (if
|
12 |
+
you are entering into this Agreement on such person or entity's behalf), of the age
|
13 |
+
required under applicable laws, rules or regulations to provide legal consent and that
|
14 |
+
has legal authority to bind your employer or such other person or entity if you are
|
15 |
+
entering in this Agreement on their behalf.
|
16 |
+
|
17 |
+
"Llama 2" means the foundational large language models and software and
|
18 |
+
algorithms, including machine-learning model code, trained model weights,
|
19 |
+
inference-enabling code, training-enabling code, fine-tuning enabling code and other
|
20 |
+
elements of the foregoing distributed by Meta at ai.meta.com/resources/models-and-
|
21 |
+
libraries/llama-downloads/.
|
22 |
+
|
23 |
+
"Llama Materials" means, collectively, Meta's proprietary Llama 2 and
|
24 |
+
Documentation (and any portion thereof) made available under this Agreement.
|
25 |
+
|
26 |
+
"Meta" or "we" means Meta Platforms Ireland Limited (if you are located in or, if you
|
27 |
+
are an entity, your principal place of business is in the EEA or Switzerland) and Meta
|
28 |
+
Platforms, Inc. (if you are located outside of the EEA or Switzerland).
|
29 |
+
|
30 |
+
By clicking "I Accept" below or by using or distributing any portion or element of the
|
31 |
+
Llama Materials, you agree to be bound by this Agreement.
|
32 |
+
|
33 |
+
1. License Rights and Redistribution.
|
34 |
+
|
35 |
+
a. Grant of Rights. You are granted a non-exclusive, worldwide, non-
|
36 |
+
transferable and royalty-free limited license under Meta's intellectual property or
|
37 |
+
other rights owned by Meta embodied in the Llama Materials to use, reproduce,
|
38 |
+
distribute, copy, create derivative works of, and make modifications to the Llama
|
39 |
+
Materials.
|
40 |
+
|
41 |
+
b. Redistribution and Use.
|
42 |
+
|
43 |
+
i. If you distribute or make the Llama Materials, or any derivative works
|
44 |
+
thereof, available to a third party, you shall provide a copy of this Agreement to such
|
45 |
+
third party.
|
46 |
+
ii. If you receive Llama Materials, or any derivative works thereof, from
|
47 |
+
a Licensee as part of an integrated end user product, then Section 2 of this
|
48 |
+
Agreement will not apply to you.
|
49 |
+
|
50 |
+
iii. You must retain in all copies of the Llama Materials that you
|
51 |
+
distribute the following attribution notice within a "Notice" text file distributed as a
|
52 |
+
part of such copies: "Llama 2 is licensed under the LLAMA 2 Community License,
|
53 |
+
Copyright (c) Meta Platforms, Inc. All Rights Reserved."
|
54 |
+
|
55 |
+
iv. Your use of the Llama Materials must comply with applicable laws
|
56 |
+
and regulations (including trade compliance laws and regulations) and adhere to the
|
57 |
+
Acceptable Use Policy for the Llama Materials (available at
|
58 |
+
https://ai.meta.com/llama/use-policy), which is hereby incorporated by reference into
|
59 |
+
this Agreement.
|
60 |
+
|
61 |
+
v. You will not use the Llama Materials or any output or results of the
|
62 |
+
Llama Materials to improve any other large language model (excluding Llama 2 or
|
63 |
+
derivative works thereof).
|
64 |
+
|
65 |
+
2. Additional Commercial Terms. If, on the Llama 2 version release date, the
|
66 |
+
monthly active users of the products or services made available by or for Licensee,
|
67 |
+
or Licensee's affiliates, is greater than 700 million monthly active users in the
|
68 |
+
preceding calendar month, you must request a license from Meta, which Meta may
|
69 |
+
grant to you in its sole discretion, and you are not authorized to exercise any of the
|
70 |
+
rights under this Agreement unless or until Meta otherwise expressly grants you
|
71 |
+
such rights.
|
72 |
+
|
73 |
+
3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE
|
74 |
+
LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE
|
75 |
+
PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
|
76 |
+
EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY
|
77 |
+
WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR
|
78 |
+
FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE
|
79 |
+
FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING
|
80 |
+
THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR
|
81 |
+
USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS.
|
82 |
+
|
83 |
+
4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE
|
84 |
+
LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT,
|
85 |
+
NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS
|
86 |
+
AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL,
|
87 |
+
CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN
|
88 |
+
IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF
|
89 |
+
ANY OF THE FOREGOING.
|
90 |
+
|
91 |
+
5. Intellectual Property.
|
92 |
+
|
93 |
+
a. No trademark licenses are granted under this Agreement, and in
|
94 |
+
connection with the Llama Materials, neither Meta nor Licensee may use any name
|
95 |
+
or mark owned by or associated with the other or any of its affiliates, except as
|
96 |
+
required for reasonable and customary use in describing and redistributing the
|
97 |
+
Llama Materials.
|
98 |
+
|
99 |
+
b. Subject to Meta's ownership of Llama Materials and derivatives made by or
|
100 |
+
for Meta, with respect to any derivative works and modifications of the Llama
|
101 |
+
Materials that are made by you, as between you and Meta, you are and will be the
|
102 |
+
owner of such derivative works and modifications.
|
103 |
+
|
104 |
+
c. If you institute litigation or other proceedings against Meta or any entity
|
105 |
+
(including a cross-claim or counterclaim in a lawsuit) alleging that the Llama
|
106 |
+
Materials or Llama 2 outputs or results, or any portion of any of the foregoing,
|
107 |
+
constitutes infringement of intellectual property or other rights owned or licensable
|
108 |
+
by you, then any licenses granted to you under this Agreement shall terminate as of
|
109 |
+
the date such litigation or claim is filed or instituted. You will indemnify and hold
|
110 |
+
harmless Meta from and against any claim by any third party arising out of or related
|
111 |
+
to your use or distribution of the Llama Materials.
|
112 |
+
|
113 |
+
6. Term and Termination. The term of this Agreement will commence upon your
|
114 |
+
acceptance of this Agreement or access to the Llama Materials and will continue in
|
115 |
+
full force and effect until terminated in accordance with the terms and conditions
|
116 |
+
herein. Meta may terminate this Agreement if you are in breach of any term or
|
117 |
+
condition of this Agreement. Upon termination of this Agreement, you shall delete
|
118 |
+
and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the
|
119 |
+
termination of this Agreement.
|
120 |
+
|
121 |
+
7. Governing Law and Jurisdiction. This Agreement will be governed and
|
122 |
+
construed under the laws of the State of California without regard to choice of law
|
123 |
+
principles, and the UN Convention on Contracts for the International Sale of Goods
|
124 |
+
does not apply to this Agreement. The courts of California shall have exclusive
|
125 |
+
jurisdiction of any dispute arising out of this Agreement.
|
126 |
+
|
README.md
ADDED
@@ -0,0 +1,323 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
extra_gated_heading: You need to share contact information with Meta to access this model
|
3 |
+
extra_gated_prompt: >-
|
4 |
+
### LLAMA 2 COMMUNITY LICENSE AGREEMENT
|
5 |
+
|
6 |
+
"Agreement" means the terms and conditions for use, reproduction, distribution
|
7 |
+
and modification of the Llama Materials set forth herein.
|
8 |
+
|
9 |
+
"Documentation" means the specifications, manuals and documentation
|
10 |
+
accompanying Llama 2 distributed by Meta at
|
11 |
+
https://ai.meta.com/resources/models-and-libraries/llama-downloads/.
|
12 |
+
|
13 |
+
"Licensee" or "you" means you, or your employer or any other person or entity
|
14 |
+
(if you are entering into this Agreement on such person or entity's behalf),
|
15 |
+
of the age required under applicable laws, rules or regulations to provide
|
16 |
+
legal consent and that has legal authority to bind your employer or such other
|
17 |
+
person or entity if you are entering in this Agreement on their behalf.
|
18 |
+
|
19 |
+
"Llama 2" means the foundational large language models and software and
|
20 |
+
algorithms, including machine-learning model code, trained model weights,
|
21 |
+
inference-enabling code, training-enabling code, fine-tuning enabling code and
|
22 |
+
other elements of the foregoing distributed by Meta at
|
23 |
+
ai.meta.com/resources/models-and-libraries/llama-downloads/.
|
24 |
+
|
25 |
+
"Llama Materials" means, collectively, Meta's proprietary Llama 2 and
|
26 |
+
documentation (and any portion thereof) made available under this Agreement.
|
27 |
+
|
28 |
+
"Meta" or "we" means Meta Platforms Ireland Limited (if you are located in or,
|
29 |
+
if you are an entity, your principal place of business is in the EEA or
|
30 |
+
Switzerland) and Meta Platforms, Inc. (if you are located outside of the EEA
|
31 |
+
or Switzerland).
|
32 |
+
|
33 |
+
|
34 |
+
By clicking "I Accept" below or by using or distributing any portion or
|
35 |
+
element of the Llama Materials, you agree to be bound by this Agreement.
|
36 |
+
|
37 |
+
1. License Rights and Redistribution.
|
38 |
+
|
39 |
+
a. Grant of Rights. You are granted a non-exclusive, worldwide, non-
|
40 |
+
transferable and royalty-free limited license under Meta's intellectual
|
41 |
+
property or other rights owned by Meta embodied in the Llama Materials to
|
42 |
+
use, reproduce, distribute, copy, create derivative works of, and make
|
43 |
+
modifications to the Llama Materials.
|
44 |
+
|
45 |
+
b. Redistribution and Use.
|
46 |
+
|
47 |
+
i. If you distribute or make the Llama Materials, or any derivative works
|
48 |
+
thereof, available to a third party, you shall provide a copy of this
|
49 |
+
Agreement to such third party.
|
50 |
+
|
51 |
+
ii. If you receive Llama Materials, or any derivative works thereof, from a
|
52 |
+
Licensee as part of an integrated end user product, then Section 2 of this
|
53 |
+
Agreement will not apply to you.
|
54 |
+
|
55 |
+
iii. You must retain in all copies of the Llama Materials that you distribute
|
56 |
+
the following attribution notice within a "Notice" text file distributed as a
|
57 |
+
part of such copies: "Llama 2 is licensed under the LLAMA 2 Community
|
58 |
+
License, Copyright (c) Meta Platforms, Inc. All Rights Reserved."
|
59 |
+
|
60 |
+
iv. Your use of the Llama Materials must comply with applicable laws and
|
61 |
+
regulations (including trade compliance laws and regulations) and adhere to
|
62 |
+
the Acceptable Use Policy for the Llama Materials (available at
|
63 |
+
https://ai.meta.com/llama/use-policy), which is hereby incorporated by
|
64 |
+
reference into this Agreement.
|
65 |
+
|
66 |
+
v. You will not use the Llama Materials or any output or results of the Llama
|
67 |
+
Materials to improve any other large language model (excluding Llama 2 or
|
68 |
+
derivative works thereof).
|
69 |
+
|
70 |
+
|
71 |
+
2. Additional Commercial Terms. If, on the Llama 2 version release date, the
|
72 |
+
monthly active users of the products or services made available by or for
|
73 |
+
Licensee, or Licensee's affiliates, is greater than 700 million monthly
|
74 |
+
active users in the preceding calendar month, you must request a license from
|
75 |
+
Meta, which Meta may grant to you in its sole discretion, and you are not
|
76 |
+
authorized to exercise any of the rights under this Agreement unless or until
|
77 |
+
Meta otherwise expressly grants you such rights.
|
78 |
+
|
79 |
+
3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA
|
80 |
+
MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN "AS IS"
|
81 |
+
BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING,
|
82 |
+
WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT,
|
83 |
+
MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY
|
84 |
+
RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING
|
85 |
+
THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE
|
86 |
+
LLAMA MATERIALS AND ANY OUTPUT AND RESULTS.
|
87 |
+
|
88 |
+
4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE
|
89 |
+
UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE,
|
90 |
+
PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST
|
91 |
+
PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR
|
92 |
+
PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE
|
93 |
+
POSSIBILITY OF ANY OF THE FOREGOING.
|
94 |
+
|
95 |
+
|
96 |
+
5. Intellectual Property.
|
97 |
+
|
98 |
+
a. No trademark licenses are granted under this Agreement, and in connection
|
99 |
+
with the Llama Materials, neither Meta nor Licensee may use any name or mark
|
100 |
+
owned by or associated with the other or any of its affiliates, except as
|
101 |
+
required for reasonable and customary use in describing and redistributing
|
102 |
+
the Llama Materials.
|
103 |
+
|
104 |
+
b. Subject to Meta's ownership of Llama Materials and derivatives made by or
|
105 |
+
for Meta, with respect to any derivative works and modifications of the Llama
|
106 |
+
Materials that are made by you, as between you and Meta, you are and will be
|
107 |
+
the owner of such derivative works and modifications.
|
108 |
+
|
109 |
+
c. If you institute litigation or other proceedings against Meta or any
|
110 |
+
entity (including a cross-claim or counterclaim in a lawsuit) alleging that
|
111 |
+
the Llama Materials or Llama 2 outputs or results, or any portion of any of
|
112 |
+
the foregoing, constitutes infringement of intellectual property or other
|
113 |
+
rights owned or licensable by you, then any licenses granted to you under
|
114 |
+
this Agreement shall terminate as of the date such litigation or claim is
|
115 |
+
filed or instituted. You will indemnify and hold harmless Meta from and
|
116 |
+
against any claim by any third party arising out of or related to your use or
|
117 |
+
distribution of the Llama Materials.
|
118 |
+
|
119 |
+
6. Term and Termination. The term of this Agreement will commence upon your
|
120 |
+
acceptance of this Agreement or access to the Llama Materials and will
|
121 |
+
continue in full force and effect until terminated in accordance with the
|
122 |
+
terms and conditions herein. Meta may terminate this Agreement if you are in
|
123 |
+
breach of any term or condition of this Agreement. Upon termination of this
|
124 |
+
Agreement, you shall delete and cease use of the Llama Materials. Sections 3,
|
125 |
+
4 and 7 shall survive the termination of this Agreement.
|
126 |
+
|
127 |
+
7. Governing Law and Jurisdiction. This Agreement will be governed and
|
128 |
+
construed under the laws of the State of California without regard to choice
|
129 |
+
of law principles, and the UN Convention on Contracts for the International
|
130 |
+
Sale of Goods does not apply to this Agreement. The courts of California
|
131 |
+
shall have exclusive jurisdiction of any dispute arising out of this
|
132 |
+
Agreement.
|
133 |
+
|
134 |
+
### Llama 2 Acceptable Use Policy
|
135 |
+
|
136 |
+
Meta is committed to promoting safe and fair use of its tools and features,
|
137 |
+
including Llama 2. If you access or use Llama 2, you agree to this Acceptable
|
138 |
+
Use Policy (“Policy”). The most recent copy of this policy can be found at
|
139 |
+
[ai.meta.com/llama/use-policy](http://ai.meta.com/llama/use-policy).
|
140 |
+
|
141 |
+
#### Prohibited Uses
|
142 |
+
|
143 |
+
We want everyone to use Llama 2 safely and responsibly. You agree you will not
|
144 |
+
use, or allow others to use, Llama 2 to:
|
145 |
+
|
146 |
+
1. Violate the law or others’ rights, including to:
|
147 |
+
1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:
|
148 |
+
1. Violence or terrorism
|
149 |
+
2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material
|
150 |
+
3. Human trafficking, exploitation, and sexual violence
|
151 |
+
4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.
|
152 |
+
5. Sexual solicitation
|
153 |
+
6. Any other criminal activity
|
154 |
+
2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals
|
155 |
+
3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services
|
156 |
+
4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices
|
157 |
+
5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws
|
158 |
+
6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials
|
159 |
+
7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system
|
160 |
+
2. Engage in, promote, incite, facilitate, or assist in the planning or
|
161 |
+
development of activities that present a risk of death or bodily harm to
|
162 |
+
individuals, including use of Llama 2 related to the following:
|
163 |
+
1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State
|
164 |
+
2. Guns and illegal weapons (including weapon development)
|
165 |
+
3. Illegal drugs and regulated/controlled substances
|
166 |
+
4. Operation of critical infrastructure, transportation technologies, or heavy machinery
|
167 |
+
5. Self-harm or harm to others, including suicide, cutting, and eating disorders
|
168 |
+
6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual
|
169 |
+
3. Intentionally deceive or mislead others, including use of Llama 2 related
|
170 |
+
to the following:
|
171 |
+
1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation
|
172 |
+
2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content
|
173 |
+
3. Generating, promoting, or further distributing spam
|
174 |
+
4. Impersonating another individual without consent, authorization, or legal right
|
175 |
+
5. Representing that the use of Llama 2 or outputs are human-generated
|
176 |
+
6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement
|
177 |
+
4. Fail to appropriately disclose to end users any known dangers of your AI system
|
178 |
+
Please report any violation of this Policy, software “bug,” or other problems
|
179 |
+
that could lead to a violation of this Policy through one of the following
|
180 |
+
means:
|
181 |
+
* Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama)
|
182 |
+
* Reporting risky content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback)
|
183 |
+
* Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info)
|
184 |
+
* Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: [[email protected]](mailto:[email protected])
|
185 |
+
extra_gated_fields:
|
186 |
+
First Name: text
|
187 |
+
Last Name: text
|
188 |
+
Date of birth: date_picker
|
189 |
+
Country: country
|
190 |
+
Affiliation: text
|
191 |
+
geo: ip_location
|
192 |
+
By clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox
|
193 |
+
extra_gated_description: >-
|
194 |
+
The information you provide will be collected, stored, processed and shared in
|
195 |
+
accordance with the [Meta Privacy
|
196 |
+
Policy](https://www.facebook.com/privacy/policy/).
|
197 |
+
extra_gated_button_content: Submit
|
198 |
+
language:
|
199 |
+
- en
|
200 |
+
pipeline_tag: text-generation
|
201 |
+
tags:
|
202 |
+
- facebook
|
203 |
+
- meta
|
204 |
+
- pytorch
|
205 |
+
- llama
|
206 |
+
- llama-2
|
207 |
+
license: llama2
|
208 |
+
---
|
209 |
+
# **Llama 2**
|
210 |
+
Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 70B pretrained model, converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom.
|
211 |
+
|
212 |
+
## Model Details
|
213 |
+
*Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the [website](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) and accept our License before requesting access here.*
|
214 |
+
|
215 |
+
Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM.
|
216 |
+
|
217 |
+
**Model Developers** Meta
|
218 |
+
|
219 |
+
**Variations** Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations.
|
220 |
+
|
221 |
+
**Input** Models input text only.
|
222 |
+
|
223 |
+
**Output** Models generate text only.
|
224 |
+
|
225 |
+
**Model Architecture** Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety.
|
226 |
+
|
227 |
+
|
228 |
+
||Training Data|Params|Content Length|GQA|Tokens|LR|
|
229 |
+
|---|---|---|---|---|---|---|
|
230 |
+
|Llama 2|*A new mix of publicly available online data*|7B|4k|✗|2.0T|3.0 x 10<sup>-4</sup>|
|
231 |
+
|Llama 2|*A new mix of publicly available online data*|13B|4k|✗|2.0T|3.0 x 10<sup>-4</sup>|
|
232 |
+
|Llama 2|*A new mix of publicly available online data*|70B|4k|✔|2.0T|1.5 x 10<sup>-4</sup>|
|
233 |
+
|
234 |
+
*Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability.
|
235 |
+
|
236 |
+
**Model Dates** Llama 2 was trained between January 2023 and July 2023.
|
237 |
+
|
238 |
+
**Status** This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback.
|
239 |
+
|
240 |
+
**License** A custom commercial license is available at: [https://ai.meta.com/resources/models-and-libraries/llama-downloads/](https://ai.meta.com/resources/models-and-libraries/llama-downloads/)
|
241 |
+
|
242 |
+
**Research Paper** ["Llama-2: Open Foundation and Fine-tuned Chat Models"](arxiv.org/abs/2307.09288)
|
243 |
+
|
244 |
+
## Intended Use
|
245 |
+
**Intended Use Cases** Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks.
|
246 |
+
|
247 |
+
To get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the `INST` and `<<SYS>>` tags, `BOS` and `EOS` tokens, and the whitespaces and breaklines in between (we recommend calling `strip()` on inputs to avoid double-spaces). See our reference code in github for details: [`chat_completion`](https://github.com/facebookresearch/llama/blob/main/llama/generation.py#L212).
|
248 |
+
|
249 |
+
**Out-of-scope Uses** Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2.
|
250 |
+
|
251 |
+
## Hardware and Software
|
252 |
+
**Training Factors** We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute.
|
253 |
+
|
254 |
+
**Carbon Footprint** Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program.
|
255 |
+
|
256 |
+
||Time (GPU hours)|Power Consumption (W)|Carbon Emitted(tCO<sub>2</sub>eq)|
|
257 |
+
|---|---|---|---|
|
258 |
+
|Llama 2 7B|184320|400|31.22|
|
259 |
+
|Llama 2 13B|368640|400|62.44|
|
260 |
+
|Llama 2 70B|1720320|400|291.42|
|
261 |
+
|Total|3311616||539.00|
|
262 |
+
|
263 |
+
**CO<sub>2</sub> emissions during pretraining.** Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others.
|
264 |
+
|
265 |
+
## Training Data
|
266 |
+
**Overview** Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data.
|
267 |
+
|
268 |
+
**Data Freshness** The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023.
|
269 |
+
|
270 |
+
## Evaluation Results
|
271 |
+
|
272 |
+
In this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library.
|
273 |
+
|
274 |
+
|Model|Size|Code|Commonsense Reasoning|World Knowledge|Reading Comprehension|Math|MMLU|BBH|AGI Eval|
|
275 |
+
|---|---|---|---|---|---|---|---|---|---|
|
276 |
+
|Llama 1|7B|14.1|60.8|46.2|58.5|6.95|35.1|30.3|23.9|
|
277 |
+
|Llama 1|13B|18.9|66.1|52.6|62.3|10.9|46.9|37.0|33.9|
|
278 |
+
|Llama 1|33B|26.0|70.0|58.4|67.6|21.4|57.8|39.8|41.7|
|
279 |
+
|Llama 1|65B|30.7|70.7|60.5|68.6|30.8|63.4|43.5|47.6|
|
280 |
+
|Llama 2|7B|16.8|63.9|48.9|61.3|14.6|45.3|32.6|29.3|
|
281 |
+
|Llama 2|13B|24.5|66.9|55.4|65.8|28.7|54.8|39.4|39.1|
|
282 |
+
|Llama 2|70B|**37.5**|**71.9**|**63.6**|**69.4**|**35.2**|**68.9**|**51.2**|**54.2**|
|
283 |
+
|
284 |
+
**Overall performance on grouped academic benchmarks.** *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1.
|
285 |
+
|
286 |
+
|||TruthfulQA|Toxigen|
|
287 |
+
|---|---|---|---|
|
288 |
+
|Llama 1|7B|27.42|23.00|
|
289 |
+
|Llama 1|13B|41.74|23.08|
|
290 |
+
|Llama 1|33B|44.19|22.57|
|
291 |
+
|Llama 1|65B|48.71|21.77|
|
292 |
+
|Llama 2|7B|33.29|**21.25**|
|
293 |
+
|Llama 2|13B|41.86|26.10|
|
294 |
+
|Llama 2|70B|**50.18**|24.60|
|
295 |
+
|
296 |
+
**Evaluation of pretrained LLMs on automatic safety benchmarks.** For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better).
|
297 |
+
|
298 |
+
|
299 |
+
|||TruthfulQA|Toxigen|
|
300 |
+
|---|---|---|---|
|
301 |
+
|Llama-2-Chat|7B|57.04|**0.00**|
|
302 |
+
|Llama-2-Chat|13B|62.18|**0.00**|
|
303 |
+
|Llama-2-Chat|70B|**64.14**|0.01|
|
304 |
+
|
305 |
+
**Evaluation of fine-tuned LLMs on different safety datasets.** Same metric definitions as above.
|
306 |
+
|
307 |
+
## Ethical Considerations and Limitations
|
308 |
+
Llama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model.
|
309 |
+
|
310 |
+
Please see the Responsible Use Guide available at [https://ai.meta.com/llama/responsible-use-guide/](https://ai.meta.com/llama/responsible-use-guide)
|
311 |
+
|
312 |
+
## Reporting Issues
|
313 |
+
Please report any software “bug,” or other problems with the models through one of the following means:
|
314 |
+
- Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama)
|
315 |
+
- Reporting problematic content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback)
|
316 |
+
- Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info)
|
317 |
+
|
318 |
+
## Llama Model Index
|
319 |
+
|Model|Llama2|Llama2-hf|Llama2-chat|Llama2-chat-hf|
|
320 |
+
|---|---|---|---|---|
|
321 |
+
|7B| [Link](https://huggingface.co/meta-llama/Llama-2-7b) | [Link](https://huggingface.co/meta-llama/Llama-2-7b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-7b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf)|
|
322 |
+
|13B| [Link](https://huggingface.co/meta-llama/Llama-2-13b) | [Link](https://huggingface.co/meta-llama/Llama-2-13b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-13b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-13b-chat-hf)|
|
323 |
+
|70B| [Link](https://huggingface.co/meta-llama/Llama-2-70b) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf)|
|
USE_POLICY.md
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Llama 2 Acceptable Use Policy
|
2 |
+
|
3 |
+
Meta is committed to promoting safe and fair use of its tools and features, including Llama 2. If you access or use Llama 2, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at [ai.meta.com/llama/use-policy](http://ai.meta.com/llama/use-policy).
|
4 |
+
|
5 |
+
## Prohibited Uses
|
6 |
+
We want everyone to use Llama 2 safely and responsibly. You agree you will not use, or allow others to use, Llama 2 to:
|
7 |
+
|
8 |
+
1. Violate the law or others’ rights, including to:
|
9 |
+
1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:
|
10 |
+
1. Violence or terrorism
|
11 |
+
2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material
|
12 |
+
3. Human trafficking, exploitation, and sexual violence
|
13 |
+
4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.
|
14 |
+
5. Sexual solicitation
|
15 |
+
6. Any other criminal activity
|
16 |
+
2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals
|
17 |
+
3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services
|
18 |
+
4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices
|
19 |
+
5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws
|
20 |
+
6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials
|
21 |
+
7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system
|
22 |
+
|
23 |
+
|
24 |
+
|
25 |
+
2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 2 related to the following:
|
26 |
+
1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State
|
27 |
+
2. Guns and illegal weapons (including weapon development)
|
28 |
+
3. Illegal drugs and regulated/controlled substances
|
29 |
+
4. Operation of critical infrastructure, transportation technologies, or heavy machinery
|
30 |
+
5. Self-harm or harm to others, including suicide, cutting, and eating disorders
|
31 |
+
6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual
|
32 |
+
|
33 |
+
|
34 |
+
|
35 |
+
3. Intentionally deceive or mislead others, including use of Llama 2 related to the following:
|
36 |
+
1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation
|
37 |
+
2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content
|
38 |
+
3. Generating, promoting, or further distributing spam
|
39 |
+
4. Impersonating another individual without consent, authorization, or legal right
|
40 |
+
5. Representing that the use of Llama 2 or outputs are human-generated
|
41 |
+
6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement
|
42 |
+
4. Fail to appropriately disclose to end users any known dangers of your AI system
|
43 |
+
|
44 |
+
Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means:
|
45 |
+
|
46 |
+
* Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama)
|
47 |
+
* Reporting risky content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback)
|
48 |
+
* Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info)
|
49 |
+
* Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: [[email protected]](mailto:[email protected])
|
50 |
+
|
config.json
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "meta-llama/Llama-2-70b-hf",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"auto_map": {
|
9 |
+
"AutoConfig": "configuration_dropped_llama.LlamaConfig",
|
10 |
+
"AutoModelForCausalLM": "modeling_dropped_llama.LlamaForCausalLM"
|
11 |
+
},
|
12 |
+
"bos_token_id": 1,
|
13 |
+
"drop_attn_list": [
|
14 |
+
22,
|
15 |
+
30,
|
16 |
+
32,
|
17 |
+
36,
|
18 |
+
43,
|
19 |
+
45,
|
20 |
+
46,
|
21 |
+
47,
|
22 |
+
48,
|
23 |
+
49,
|
24 |
+
50,
|
25 |
+
51,
|
26 |
+
52,
|
27 |
+
53,
|
28 |
+
54,
|
29 |
+
55,
|
30 |
+
56,
|
31 |
+
57,
|
32 |
+
58,
|
33 |
+
59,
|
34 |
+
60,
|
35 |
+
61,
|
36 |
+
62,
|
37 |
+
63,
|
38 |
+
64,
|
39 |
+
65,
|
40 |
+
66,
|
41 |
+
67,
|
42 |
+
68,
|
43 |
+
69,
|
44 |
+
70,
|
45 |
+
71,
|
46 |
+
72,
|
47 |
+
73,
|
48 |
+
74,
|
49 |
+
75,
|
50 |
+
76,
|
51 |
+
77,
|
52 |
+
78,
|
53 |
+
79
|
54 |
+
],
|
55 |
+
"drop_mlp_list": null,
|
56 |
+
"eos_token_id": 2,
|
57 |
+
"hidden_act": "silu",
|
58 |
+
"hidden_size": 8192,
|
59 |
+
"initializer_range": 0.02,
|
60 |
+
"intermediate_size": 28672,
|
61 |
+
"max_position_embeddings": 4096,
|
62 |
+
"mlp_bias": false,
|
63 |
+
"model_type": "llama",
|
64 |
+
"num_attention_heads": 64,
|
65 |
+
"num_hidden_layers": 80,
|
66 |
+
"num_key_value_heads": 8,
|
67 |
+
"pretraining_tp": 1,
|
68 |
+
"rms_norm_eps": 1e-05,
|
69 |
+
"rope_scaling": null,
|
70 |
+
"rope_theta": 10000.0,
|
71 |
+
"tie_word_embeddings": false,
|
72 |
+
"torch_dtype": "float16",
|
73 |
+
"transformers_version": "4.41.2",
|
74 |
+
"use_cache": true,
|
75 |
+
"vocab_size": 32000
|
76 |
+
}
|
configs/Llama2-70b-Drop16Attn/config.json
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "meta-llama/Llama-2-70b-hf",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"auto_map": {
|
9 |
+
"AutoConfig": "configuration_dropped_llama.LlamaConfig",
|
10 |
+
"AutoModelForCausalLM": "modeling_dropped_llama.LlamaForCausalLM"
|
11 |
+
},
|
12 |
+
"bos_token_id": 1,
|
13 |
+
"drop_attn_list": [
|
14 |
+
22,
|
15 |
+
30,
|
16 |
+
32,
|
17 |
+
36,
|
18 |
+
43,
|
19 |
+
45,
|
20 |
+
46,
|
21 |
+
47,
|
22 |
+
48,
|
23 |
+
49,
|
24 |
+
50,
|
25 |
+
51,
|
26 |
+
52,
|
27 |
+
53,
|
28 |
+
54,
|
29 |
+
55,
|
30 |
+
56,
|
31 |
+
57,
|
32 |
+
58,
|
33 |
+
59,
|
34 |
+
60,
|
35 |
+
61,
|
36 |
+
62,
|
37 |
+
63,
|
38 |
+
64,
|
39 |
+
65,
|
40 |
+
66,
|
41 |
+
67,
|
42 |
+
68,
|
43 |
+
69,
|
44 |
+
70,
|
45 |
+
71,
|
46 |
+
72,
|
47 |
+
73,
|
48 |
+
74,
|
49 |
+
75,
|
50 |
+
76,
|
51 |
+
77,
|
52 |
+
78,
|
53 |
+
79
|
54 |
+
],
|
55 |
+
"drop_mlp_list": null,
|
56 |
+
"eos_token_id": 2,
|
57 |
+
"hidden_act": "silu",
|
58 |
+
"hidden_size": 8192,
|
59 |
+
"initializer_range": 0.02,
|
60 |
+
"intermediate_size": 28672,
|
61 |
+
"max_position_embeddings": 4096,
|
62 |
+
"mlp_bias": false,
|
63 |
+
"model_type": "llama",
|
64 |
+
"num_attention_heads": 64,
|
65 |
+
"num_hidden_layers": 80,
|
66 |
+
"num_key_value_heads": 8,
|
67 |
+
"pretraining_tp": 1,
|
68 |
+
"rms_norm_eps": 1e-05,
|
69 |
+
"rope_scaling": null,
|
70 |
+
"rope_theta": 10000.0,
|
71 |
+
"tie_word_embeddings": false,
|
72 |
+
"torch_dtype": "float16",
|
73 |
+
"transformers_version": "4.41.2",
|
74 |
+
"use_cache": true,
|
75 |
+
"vocab_size": 32000
|
76 |
+
}
|
configs/Llama2-70b-Drop16Block/config.json
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "meta-llama/Llama-2-70b-hf",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"auto_map": {
|
9 |
+
"AutoConfig": "configuration_dropped_llama.LlamaConfig",
|
10 |
+
"AutoModelForCausalLM": "modeling_dropped_llama.LlamaForCausalLM"
|
11 |
+
},
|
12 |
+
"bos_token_id": 1,
|
13 |
+
"drop_attn_list": [
|
14 |
+
22,
|
15 |
+
30,
|
16 |
+
32,
|
17 |
+
36,
|
18 |
+
43,
|
19 |
+
45,
|
20 |
+
46,
|
21 |
+
47,
|
22 |
+
48,
|
23 |
+
49,
|
24 |
+
50,
|
25 |
+
51,
|
26 |
+
52,
|
27 |
+
53,
|
28 |
+
54,
|
29 |
+
55,
|
30 |
+
56,
|
31 |
+
57,
|
32 |
+
58,
|
33 |
+
59,
|
34 |
+
60,
|
35 |
+
61,
|
36 |
+
62,
|
37 |
+
63,
|
38 |
+
64,
|
39 |
+
65,
|
40 |
+
66,
|
41 |
+
67,
|
42 |
+
68,
|
43 |
+
69,
|
44 |
+
70,
|
45 |
+
71,
|
46 |
+
72,
|
47 |
+
73,
|
48 |
+
74,
|
49 |
+
75,
|
50 |
+
76,
|
51 |
+
77,
|
52 |
+
78,
|
53 |
+
79
|
54 |
+
],
|
55 |
+
"drop_mlp_list": null,
|
56 |
+
"eos_token_id": 2,
|
57 |
+
"hidden_act": "silu",
|
58 |
+
"hidden_size": 8192,
|
59 |
+
"initializer_range": 0.02,
|
60 |
+
"intermediate_size": 28672,
|
61 |
+
"max_position_embeddings": 4096,
|
62 |
+
"mlp_bias": false,
|
63 |
+
"model_type": "llama",
|
64 |
+
"num_attention_heads": 64,
|
65 |
+
"num_hidden_layers": 80,
|
66 |
+
"num_key_value_heads": 8,
|
67 |
+
"pretraining_tp": 1,
|
68 |
+
"rms_norm_eps": 1e-05,
|
69 |
+
"rope_scaling": null,
|
70 |
+
"rope_theta": 10000.0,
|
71 |
+
"tie_word_embeddings": false,
|
72 |
+
"torch_dtype": "float16",
|
73 |
+
"transformers_version": "4.41.2",
|
74 |
+
"use_cache": true,
|
75 |
+
"vocab_size": 32000
|
76 |
+
}
|
configs/Llama2-70b-Drop16MLP/config.json
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "meta-llama/Llama-2-70b-hf",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"auto_map": {
|
9 |
+
"AutoConfig": "configuration_dropped_llama.LlamaConfig",
|
10 |
+
"AutoModelForCausalLM": "modeling_dropped_llama.LlamaForCausalLM"
|
11 |
+
},
|
12 |
+
"bos_token_id": 1,
|
13 |
+
"drop_attn_list": [
|
14 |
+
22,
|
15 |
+
30,
|
16 |
+
32,
|
17 |
+
36,
|
18 |
+
43,
|
19 |
+
45,
|
20 |
+
46,
|
21 |
+
47,
|
22 |
+
48,
|
23 |
+
49,
|
24 |
+
50,
|
25 |
+
51,
|
26 |
+
52,
|
27 |
+
53,
|
28 |
+
54,
|
29 |
+
55,
|
30 |
+
56,
|
31 |
+
57,
|
32 |
+
58,
|
33 |
+
59,
|
34 |
+
60,
|
35 |
+
61,
|
36 |
+
62,
|
37 |
+
63,
|
38 |
+
64,
|
39 |
+
65,
|
40 |
+
66,
|
41 |
+
67,
|
42 |
+
68,
|
43 |
+
69,
|
44 |
+
70,
|
45 |
+
71,
|
46 |
+
72,
|
47 |
+
73,
|
48 |
+
74,
|
49 |
+
75,
|
50 |
+
76,
|
51 |
+
77,
|
52 |
+
78,
|
53 |
+
79
|
54 |
+
],
|
55 |
+
"drop_mlp_list": null,
|
56 |
+
"eos_token_id": 2,
|
57 |
+
"hidden_act": "silu",
|
58 |
+
"hidden_size": 8192,
|
59 |
+
"initializer_range": 0.02,
|
60 |
+
"intermediate_size": 28672,
|
61 |
+
"max_position_embeddings": 4096,
|
62 |
+
"mlp_bias": false,
|
63 |
+
"model_type": "llama",
|
64 |
+
"num_attention_heads": 64,
|
65 |
+
"num_hidden_layers": 80,
|
66 |
+
"num_key_value_heads": 8,
|
67 |
+
"pretraining_tp": 1,
|
68 |
+
"rms_norm_eps": 1e-05,
|
69 |
+
"rope_scaling": null,
|
70 |
+
"rope_theta": 10000.0,
|
71 |
+
"tie_word_embeddings": false,
|
72 |
+
"torch_dtype": "float16",
|
73 |
+
"transformers_version": "4.41.2",
|
74 |
+
"use_cache": true,
|
75 |
+
"vocab_size": 32000
|
76 |
+
}
|
configs/Llama2-70b-Drop32Attn/config.json
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "meta-llama/Llama-2-70b-hf",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"auto_map": {
|
9 |
+
"AutoConfig": "configuration_dropped_llama.LlamaConfig",
|
10 |
+
"AutoModelForCausalLM": "modeling_dropped_llama.LlamaForCausalLM"
|
11 |
+
},
|
12 |
+
"bos_token_id": 1,
|
13 |
+
"drop_attn_list": [
|
14 |
+
22,
|
15 |
+
30,
|
16 |
+
32,
|
17 |
+
36,
|
18 |
+
43,
|
19 |
+
45,
|
20 |
+
46,
|
21 |
+
47,
|
22 |
+
48,
|
23 |
+
49,
|
24 |
+
50,
|
25 |
+
51,
|
26 |
+
52,
|
27 |
+
53,
|
28 |
+
54,
|
29 |
+
55,
|
30 |
+
56,
|
31 |
+
57,
|
32 |
+
58,
|
33 |
+
59,
|
34 |
+
60,
|
35 |
+
61,
|
36 |
+
62,
|
37 |
+
63,
|
38 |
+
64,
|
39 |
+
65,
|
40 |
+
66,
|
41 |
+
67,
|
42 |
+
68,
|
43 |
+
69,
|
44 |
+
70,
|
45 |
+
71,
|
46 |
+
72,
|
47 |
+
73,
|
48 |
+
74,
|
49 |
+
75,
|
50 |
+
76,
|
51 |
+
77,
|
52 |
+
78,
|
53 |
+
79
|
54 |
+
],
|
55 |
+
"drop_mlp_list": null,
|
56 |
+
"eos_token_id": 2,
|
57 |
+
"hidden_act": "silu",
|
58 |
+
"hidden_size": 8192,
|
59 |
+
"initializer_range": 0.02,
|
60 |
+
"intermediate_size": 28672,
|
61 |
+
"max_position_embeddings": 4096,
|
62 |
+
"mlp_bias": false,
|
63 |
+
"model_type": "llama",
|
64 |
+
"num_attention_heads": 64,
|
65 |
+
"num_hidden_layers": 80,
|
66 |
+
"num_key_value_heads": 8,
|
67 |
+
"pretraining_tp": 1,
|
68 |
+
"rms_norm_eps": 1e-05,
|
69 |
+
"rope_scaling": null,
|
70 |
+
"rope_theta": 10000.0,
|
71 |
+
"tie_word_embeddings": false,
|
72 |
+
"torch_dtype": "float16",
|
73 |
+
"transformers_version": "4.41.2",
|
74 |
+
"use_cache": true,
|
75 |
+
"vocab_size": 32000
|
76 |
+
}
|
configs/Llama2-70b-Drop32Block/config.json
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "meta-llama/Llama-2-70b-hf",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"auto_map": {
|
9 |
+
"AutoConfig": "configuration_dropped_llama.LlamaConfig",
|
10 |
+
"AutoModelForCausalLM": "modeling_dropped_llama.LlamaForCausalLM"
|
11 |
+
},
|
12 |
+
"bos_token_id": 1,
|
13 |
+
"drop_attn_list": [
|
14 |
+
22,
|
15 |
+
30,
|
16 |
+
32,
|
17 |
+
36,
|
18 |
+
43,
|
19 |
+
45,
|
20 |
+
46,
|
21 |
+
47,
|
22 |
+
48,
|
23 |
+
49,
|
24 |
+
50,
|
25 |
+
51,
|
26 |
+
52,
|
27 |
+
53,
|
28 |
+
54,
|
29 |
+
55,
|
30 |
+
56,
|
31 |
+
57,
|
32 |
+
58,
|
33 |
+
59,
|
34 |
+
60,
|
35 |
+
61,
|
36 |
+
62,
|
37 |
+
63,
|
38 |
+
64,
|
39 |
+
65,
|
40 |
+
66,
|
41 |
+
67,
|
42 |
+
68,
|
43 |
+
69,
|
44 |
+
70,
|
45 |
+
71,
|
46 |
+
72,
|
47 |
+
73,
|
48 |
+
74,
|
49 |
+
75,
|
50 |
+
76,
|
51 |
+
77,
|
52 |
+
78,
|
53 |
+
79
|
54 |
+
],
|
55 |
+
"drop_mlp_list": null,
|
56 |
+
"eos_token_id": 2,
|
57 |
+
"hidden_act": "silu",
|
58 |
+
"hidden_size": 8192,
|
59 |
+
"initializer_range": 0.02,
|
60 |
+
"intermediate_size": 28672,
|
61 |
+
"max_position_embeddings": 4096,
|
62 |
+
"mlp_bias": false,
|
63 |
+
"model_type": "llama",
|
64 |
+
"num_attention_heads": 64,
|
65 |
+
"num_hidden_layers": 80,
|
66 |
+
"num_key_value_heads": 8,
|
67 |
+
"pretraining_tp": 1,
|
68 |
+
"rms_norm_eps": 1e-05,
|
69 |
+
"rope_scaling": null,
|
70 |
+
"rope_theta": 10000.0,
|
71 |
+
"tie_word_embeddings": false,
|
72 |
+
"torch_dtype": "float16",
|
73 |
+
"transformers_version": "4.41.2",
|
74 |
+
"use_cache": true,
|
75 |
+
"vocab_size": 32000
|
76 |
+
}
|
configs/Llama2-70b-Drop32MLP/config.json
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "meta-llama/Llama-2-70b-hf",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"auto_map": {
|
9 |
+
"AutoConfig": "configuration_dropped_llama.LlamaConfig",
|
10 |
+
"AutoModelForCausalLM": "modeling_dropped_llama.LlamaForCausalLM"
|
11 |
+
},
|
12 |
+
"bos_token_id": 1,
|
13 |
+
"drop_attn_list": [
|
14 |
+
22,
|
15 |
+
30,
|
16 |
+
32,
|
17 |
+
36,
|
18 |
+
43,
|
19 |
+
45,
|
20 |
+
46,
|
21 |
+
47,
|
22 |
+
48,
|
23 |
+
49,
|
24 |
+
50,
|
25 |
+
51,
|
26 |
+
52,
|
27 |
+
53,
|
28 |
+
54,
|
29 |
+
55,
|
30 |
+
56,
|
31 |
+
57,
|
32 |
+
58,
|
33 |
+
59,
|
34 |
+
60,
|
35 |
+
61,
|
36 |
+
62,
|
37 |
+
63,
|
38 |
+
64,
|
39 |
+
65,
|
40 |
+
66,
|
41 |
+
67,
|
42 |
+
68,
|
43 |
+
69,
|
44 |
+
70,
|
45 |
+
71,
|
46 |
+
72,
|
47 |
+
73,
|
48 |
+
74,
|
49 |
+
75,
|
50 |
+
76,
|
51 |
+
77,
|
52 |
+
78,
|
53 |
+
79
|
54 |
+
],
|
55 |
+
"drop_mlp_list": null,
|
56 |
+
"eos_token_id": 2,
|
57 |
+
"hidden_act": "silu",
|
58 |
+
"hidden_size": 8192,
|
59 |
+
"initializer_range": 0.02,
|
60 |
+
"intermediate_size": 28672,
|
61 |
+
"max_position_embeddings": 4096,
|
62 |
+
"mlp_bias": false,
|
63 |
+
"model_type": "llama",
|
64 |
+
"num_attention_heads": 64,
|
65 |
+
"num_hidden_layers": 80,
|
66 |
+
"num_key_value_heads": 8,
|
67 |
+
"pretraining_tp": 1,
|
68 |
+
"rms_norm_eps": 1e-05,
|
69 |
+
"rope_scaling": null,
|
70 |
+
"rope_theta": 10000.0,
|
71 |
+
"tie_word_embeddings": false,
|
72 |
+
"torch_dtype": "float16",
|
73 |
+
"transformers_version": "4.41.2",
|
74 |
+
"use_cache": true,
|
75 |
+
"vocab_size": 32000
|
76 |
+
}
|
configs/Llama2-70b-Drop40Attn/config.json
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "meta-llama/Llama-2-70b-hf",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"auto_map": {
|
9 |
+
"AutoConfig": "configuration_dropped_llama.LlamaConfig",
|
10 |
+
"AutoModelForCausalLM": "modeling_dropped_llama.LlamaForCausalLM"
|
11 |
+
},
|
12 |
+
"bos_token_id": 1,
|
13 |
+
"drop_attn_list": [
|
14 |
+
22,
|
15 |
+
30,
|
16 |
+
32,
|
17 |
+
36,
|
18 |
+
43,
|
19 |
+
45,
|
20 |
+
46,
|
21 |
+
47,
|
22 |
+
48,
|
23 |
+
49,
|
24 |
+
50,
|
25 |
+
51,
|
26 |
+
52,
|
27 |
+
53,
|
28 |
+
54,
|
29 |
+
55,
|
30 |
+
56,
|
31 |
+
57,
|
32 |
+
58,
|
33 |
+
59,
|
34 |
+
60,
|
35 |
+
61,
|
36 |
+
62,
|
37 |
+
63,
|
38 |
+
64,
|
39 |
+
65,
|
40 |
+
66,
|
41 |
+
67,
|
42 |
+
68,
|
43 |
+
69,
|
44 |
+
70,
|
45 |
+
71,
|
46 |
+
72,
|
47 |
+
73,
|
48 |
+
74,
|
49 |
+
75,
|
50 |
+
76,
|
51 |
+
77,
|
52 |
+
78,
|
53 |
+
79
|
54 |
+
],
|
55 |
+
"drop_mlp_list": null,
|
56 |
+
"eos_token_id": 2,
|
57 |
+
"hidden_act": "silu",
|
58 |
+
"hidden_size": 8192,
|
59 |
+
"initializer_range": 0.02,
|
60 |
+
"intermediate_size": 28672,
|
61 |
+
"max_position_embeddings": 4096,
|
62 |
+
"mlp_bias": false,
|
63 |
+
"model_type": "llama",
|
64 |
+
"num_attention_heads": 64,
|
65 |
+
"num_hidden_layers": 80,
|
66 |
+
"num_key_value_heads": 8,
|
67 |
+
"pretraining_tp": 1,
|
68 |
+
"rms_norm_eps": 1e-05,
|
69 |
+
"rope_scaling": null,
|
70 |
+
"rope_theta": 10000.0,
|
71 |
+
"tie_word_embeddings": false,
|
72 |
+
"torch_dtype": "float16",
|
73 |
+
"transformers_version": "4.41.2",
|
74 |
+
"use_cache": true,
|
75 |
+
"vocab_size": 32000
|
76 |
+
}
|
configs/Llama2-70b-Drop40Block/config.json
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "meta-llama/Llama-2-70b-hf",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"auto_map": {
|
9 |
+
"AutoConfig": "configuration_dropped_llama.LlamaConfig",
|
10 |
+
"AutoModelForCausalLM": "modeling_dropped_llama.LlamaForCausalLM"
|
11 |
+
},
|
12 |
+
"bos_token_id": 1,
|
13 |
+
"drop_attn_list": [
|
14 |
+
22,
|
15 |
+
30,
|
16 |
+
32,
|
17 |
+
36,
|
18 |
+
43,
|
19 |
+
45,
|
20 |
+
46,
|
21 |
+
47,
|
22 |
+
48,
|
23 |
+
49,
|
24 |
+
50,
|
25 |
+
51,
|
26 |
+
52,
|
27 |
+
53,
|
28 |
+
54,
|
29 |
+
55,
|
30 |
+
56,
|
31 |
+
57,
|
32 |
+
58,
|
33 |
+
59,
|
34 |
+
60,
|
35 |
+
61,
|
36 |
+
62,
|
37 |
+
63,
|
38 |
+
64,
|
39 |
+
65,
|
40 |
+
66,
|
41 |
+
67,
|
42 |
+
68,
|
43 |
+
69,
|
44 |
+
70,
|
45 |
+
71,
|
46 |
+
72,
|
47 |
+
73,
|
48 |
+
74,
|
49 |
+
75,
|
50 |
+
76,
|
51 |
+
77,
|
52 |
+
78,
|
53 |
+
79
|
54 |
+
],
|
55 |
+
"drop_mlp_list": null,
|
56 |
+
"eos_token_id": 2,
|
57 |
+
"hidden_act": "silu",
|
58 |
+
"hidden_size": 8192,
|
59 |
+
"initializer_range": 0.02,
|
60 |
+
"intermediate_size": 28672,
|
61 |
+
"max_position_embeddings": 4096,
|
62 |
+
"mlp_bias": false,
|
63 |
+
"model_type": "llama",
|
64 |
+
"num_attention_heads": 64,
|
65 |
+
"num_hidden_layers": 80,
|
66 |
+
"num_key_value_heads": 8,
|
67 |
+
"pretraining_tp": 1,
|
68 |
+
"rms_norm_eps": 1e-05,
|
69 |
+
"rope_scaling": null,
|
70 |
+
"rope_theta": 10000.0,
|
71 |
+
"tie_word_embeddings": false,
|
72 |
+
"torch_dtype": "float16",
|
73 |
+
"transformers_version": "4.41.2",
|
74 |
+
"use_cache": true,
|
75 |
+
"vocab_size": 32000
|
76 |
+
}
|
configs/Llama2-70b-Drop40MLP/config.json
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "meta-llama/Llama-2-70b-hf",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"auto_map": {
|
9 |
+
"AutoConfig": "configuration_dropped_llama.LlamaConfig",
|
10 |
+
"AutoModelForCausalLM": "modeling_dropped_llama.LlamaForCausalLM"
|
11 |
+
},
|
12 |
+
"bos_token_id": 1,
|
13 |
+
"drop_attn_list": [
|
14 |
+
22,
|
15 |
+
30,
|
16 |
+
32,
|
17 |
+
36,
|
18 |
+
43,
|
19 |
+
45,
|
20 |
+
46,
|
21 |
+
47,
|
22 |
+
48,
|
23 |
+
49,
|
24 |
+
50,
|
25 |
+
51,
|
26 |
+
52,
|
27 |
+
53,
|
28 |
+
54,
|
29 |
+
55,
|
30 |
+
56,
|
31 |
+
57,
|
32 |
+
58,
|
33 |
+
59,
|
34 |
+
60,
|
35 |
+
61,
|
36 |
+
62,
|
37 |
+
63,
|
38 |
+
64,
|
39 |
+
65,
|
40 |
+
66,
|
41 |
+
67,
|
42 |
+
68,
|
43 |
+
69,
|
44 |
+
70,
|
45 |
+
71,
|
46 |
+
72,
|
47 |
+
73,
|
48 |
+
74,
|
49 |
+
75,
|
50 |
+
76,
|
51 |
+
77,
|
52 |
+
78,
|
53 |
+
79
|
54 |
+
],
|
55 |
+
"drop_mlp_list": null,
|
56 |
+
"eos_token_id": 2,
|
57 |
+
"hidden_act": "silu",
|
58 |
+
"hidden_size": 8192,
|
59 |
+
"initializer_range": 0.02,
|
60 |
+
"intermediate_size": 28672,
|
61 |
+
"max_position_embeddings": 4096,
|
62 |
+
"mlp_bias": false,
|
63 |
+
"model_type": "llama",
|
64 |
+
"num_attention_heads": 64,
|
65 |
+
"num_hidden_layers": 80,
|
66 |
+
"num_key_value_heads": 8,
|
67 |
+
"pretraining_tp": 1,
|
68 |
+
"rms_norm_eps": 1e-05,
|
69 |
+
"rope_scaling": null,
|
70 |
+
"rope_theta": 10000.0,
|
71 |
+
"tie_word_embeddings": false,
|
72 |
+
"torch_dtype": "float16",
|
73 |
+
"transformers_version": "4.41.2",
|
74 |
+
"use_cache": true,
|
75 |
+
"vocab_size": 32000
|
76 |
+
}
|
configs/Llama2-70b-Drop48Attn/config.json
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "meta-llama/Llama-2-70b-hf",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"auto_map": {
|
9 |
+
"AutoConfig": "configuration_dropped_llama.LlamaConfig",
|
10 |
+
"AutoModelForCausalLM": "modeling_dropped_llama.LlamaForCausalLM"
|
11 |
+
},
|
12 |
+
"bos_token_id": 1,
|
13 |
+
"drop_attn_list": [
|
14 |
+
22,
|
15 |
+
30,
|
16 |
+
32,
|
17 |
+
36,
|
18 |
+
43,
|
19 |
+
45,
|
20 |
+
46,
|
21 |
+
47,
|
22 |
+
48,
|
23 |
+
49,
|
24 |
+
50,
|
25 |
+
51,
|
26 |
+
52,
|
27 |
+
53,
|
28 |
+
54,
|
29 |
+
55,
|
30 |
+
56,
|
31 |
+
57,
|
32 |
+
58,
|
33 |
+
59,
|
34 |
+
60,
|
35 |
+
61,
|
36 |
+
62,
|
37 |
+
63,
|
38 |
+
64,
|
39 |
+
65,
|
40 |
+
66,
|
41 |
+
67,
|
42 |
+
68,
|
43 |
+
69,
|
44 |
+
70,
|
45 |
+
71,
|
46 |
+
72,
|
47 |
+
73,
|
48 |
+
74,
|
49 |
+
75,
|
50 |
+
76,
|
51 |
+
77,
|
52 |
+
78,
|
53 |
+
79
|
54 |
+
],
|
55 |
+
"drop_mlp_list": null,
|
56 |
+
"eos_token_id": 2,
|
57 |
+
"hidden_act": "silu",
|
58 |
+
"hidden_size": 8192,
|
59 |
+
"initializer_range": 0.02,
|
60 |
+
"intermediate_size": 28672,
|
61 |
+
"max_position_embeddings": 4096,
|
62 |
+
"mlp_bias": false,
|
63 |
+
"model_type": "llama",
|
64 |
+
"num_attention_heads": 64,
|
65 |
+
"num_hidden_layers": 80,
|
66 |
+
"num_key_value_heads": 8,
|
67 |
+
"pretraining_tp": 1,
|
68 |
+
"rms_norm_eps": 1e-05,
|
69 |
+
"rope_scaling": null,
|
70 |
+
"rope_theta": 10000.0,
|
71 |
+
"tie_word_embeddings": false,
|
72 |
+
"torch_dtype": "float16",
|
73 |
+
"transformers_version": "4.41.2",
|
74 |
+
"use_cache": true,
|
75 |
+
"vocab_size": 32000
|
76 |
+
}
|
configs/Llama2-70b-Drop48Block/config.json
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "meta-llama/Llama-2-70b-hf",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"auto_map": {
|
9 |
+
"AutoConfig": "configuration_dropped_llama.LlamaConfig",
|
10 |
+
"AutoModelForCausalLM": "modeling_dropped_llama.LlamaForCausalLM"
|
11 |
+
},
|
12 |
+
"bos_token_id": 1,
|
13 |
+
"drop_attn_list": [
|
14 |
+
22,
|
15 |
+
30,
|
16 |
+
32,
|
17 |
+
36,
|
18 |
+
43,
|
19 |
+
45,
|
20 |
+
46,
|
21 |
+
47,
|
22 |
+
48,
|
23 |
+
49,
|
24 |
+
50,
|
25 |
+
51,
|
26 |
+
52,
|
27 |
+
53,
|
28 |
+
54,
|
29 |
+
55,
|
30 |
+
56,
|
31 |
+
57,
|
32 |
+
58,
|
33 |
+
59,
|
34 |
+
60,
|
35 |
+
61,
|
36 |
+
62,
|
37 |
+
63,
|
38 |
+
64,
|
39 |
+
65,
|
40 |
+
66,
|
41 |
+
67,
|
42 |
+
68,
|
43 |
+
69,
|
44 |
+
70,
|
45 |
+
71,
|
46 |
+
72,
|
47 |
+
73,
|
48 |
+
74,
|
49 |
+
75,
|
50 |
+
76,
|
51 |
+
77,
|
52 |
+
78,
|
53 |
+
79
|
54 |
+
],
|
55 |
+
"drop_mlp_list": null,
|
56 |
+
"eos_token_id": 2,
|
57 |
+
"hidden_act": "silu",
|
58 |
+
"hidden_size": 8192,
|
59 |
+
"initializer_range": 0.02,
|
60 |
+
"intermediate_size": 28672,
|
61 |
+
"max_position_embeddings": 4096,
|
62 |
+
"mlp_bias": false,
|
63 |
+
"model_type": "llama",
|
64 |
+
"num_attention_heads": 64,
|
65 |
+
"num_hidden_layers": 80,
|
66 |
+
"num_key_value_heads": 8,
|
67 |
+
"pretraining_tp": 1,
|
68 |
+
"rms_norm_eps": 1e-05,
|
69 |
+
"rope_scaling": null,
|
70 |
+
"rope_theta": 10000.0,
|
71 |
+
"tie_word_embeddings": false,
|
72 |
+
"torch_dtype": "float16",
|
73 |
+
"transformers_version": "4.41.2",
|
74 |
+
"use_cache": true,
|
75 |
+
"vocab_size": 32000
|
76 |
+
}
|
configs/Llama2-70b-Drop48MLP/config.json
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "meta-llama/Llama-2-70b-hf",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"auto_map": {
|
9 |
+
"AutoConfig": "configuration_dropped_llama.LlamaConfig",
|
10 |
+
"AutoModelForCausalLM": "modeling_dropped_llama.LlamaForCausalLM"
|
11 |
+
},
|
12 |
+
"bos_token_id": 1,
|
13 |
+
"drop_attn_list": [
|
14 |
+
22,
|
15 |
+
30,
|
16 |
+
32,
|
17 |
+
36,
|
18 |
+
43,
|
19 |
+
45,
|
20 |
+
46,
|
21 |
+
47,
|
22 |
+
48,
|
23 |
+
49,
|
24 |
+
50,
|
25 |
+
51,
|
26 |
+
52,
|
27 |
+
53,
|
28 |
+
54,
|
29 |
+
55,
|
30 |
+
56,
|
31 |
+
57,
|
32 |
+
58,
|
33 |
+
59,
|
34 |
+
60,
|
35 |
+
61,
|
36 |
+
62,
|
37 |
+
63,
|
38 |
+
64,
|
39 |
+
65,
|
40 |
+
66,
|
41 |
+
67,
|
42 |
+
68,
|
43 |
+
69,
|
44 |
+
70,
|
45 |
+
71,
|
46 |
+
72,
|
47 |
+
73,
|
48 |
+
74,
|
49 |
+
75,
|
50 |
+
76,
|
51 |
+
77,
|
52 |
+
78,
|
53 |
+
79
|
54 |
+
],
|
55 |
+
"drop_mlp_list": null,
|
56 |
+
"eos_token_id": 2,
|
57 |
+
"hidden_act": "silu",
|
58 |
+
"hidden_size": 8192,
|
59 |
+
"initializer_range": 0.02,
|
60 |
+
"intermediate_size": 28672,
|
61 |
+
"max_position_embeddings": 4096,
|
62 |
+
"mlp_bias": false,
|
63 |
+
"model_type": "llama",
|
64 |
+
"num_attention_heads": 64,
|
65 |
+
"num_hidden_layers": 80,
|
66 |
+
"num_key_value_heads": 8,
|
67 |
+
"pretraining_tp": 1,
|
68 |
+
"rms_norm_eps": 1e-05,
|
69 |
+
"rope_scaling": null,
|
70 |
+
"rope_theta": 10000.0,
|
71 |
+
"tie_word_embeddings": false,
|
72 |
+
"torch_dtype": "float16",
|
73 |
+
"transformers_version": "4.41.2",
|
74 |
+
"use_cache": true,
|
75 |
+
"vocab_size": 32000
|
76 |
+
}
|
configs/Llama2-70b-Drop4Attn/config.json
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "meta-llama/Llama-2-70b-hf",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"auto_map": {
|
9 |
+
"AutoConfig": "configuration_dropped_llama.LlamaConfig",
|
10 |
+
"AutoModelForCausalLM": "modeling_dropped_llama.LlamaForCausalLM"
|
11 |
+
},
|
12 |
+
"bos_token_id": 1,
|
13 |
+
"drop_attn_list": [
|
14 |
+
22,
|
15 |
+
30,
|
16 |
+
32,
|
17 |
+
36,
|
18 |
+
43,
|
19 |
+
45,
|
20 |
+
46,
|
21 |
+
47,
|
22 |
+
48,
|
23 |
+
49,
|
24 |
+
50,
|
25 |
+
51,
|
26 |
+
52,
|
27 |
+
53,
|
28 |
+
54,
|
29 |
+
55,
|
30 |
+
56,
|
31 |
+
57,
|
32 |
+
58,
|
33 |
+
59,
|
34 |
+
60,
|
35 |
+
61,
|
36 |
+
62,
|
37 |
+
63,
|
38 |
+
64,
|
39 |
+
65,
|
40 |
+
66,
|
41 |
+
67,
|
42 |
+
68,
|
43 |
+
69,
|
44 |
+
70,
|
45 |
+
71,
|
46 |
+
72,
|
47 |
+
73,
|
48 |
+
74,
|
49 |
+
75,
|
50 |
+
76,
|
51 |
+
77,
|
52 |
+
78,
|
53 |
+
79
|
54 |
+
],
|
55 |
+
"drop_mlp_list": null,
|
56 |
+
"eos_token_id": 2,
|
57 |
+
"hidden_act": "silu",
|
58 |
+
"hidden_size": 8192,
|
59 |
+
"initializer_range": 0.02,
|
60 |
+
"intermediate_size": 28672,
|
61 |
+
"max_position_embeddings": 4096,
|
62 |
+
"mlp_bias": false,
|
63 |
+
"model_type": "llama",
|
64 |
+
"num_attention_heads": 64,
|
65 |
+
"num_hidden_layers": 80,
|
66 |
+
"num_key_value_heads": 8,
|
67 |
+
"pretraining_tp": 1,
|
68 |
+
"rms_norm_eps": 1e-05,
|
69 |
+
"rope_scaling": null,
|
70 |
+
"rope_theta": 10000.0,
|
71 |
+
"tie_word_embeddings": false,
|
72 |
+
"torch_dtype": "float16",
|
73 |
+
"transformers_version": "4.41.2",
|
74 |
+
"use_cache": true,
|
75 |
+
"vocab_size": 32000
|
76 |
+
}
|
configs/Llama2-70b-Drop4Block/config.json
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "meta-llama/Llama-2-70b-hf",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"auto_map": {
|
9 |
+
"AutoConfig": "configuration_dropped_llama.LlamaConfig",
|
10 |
+
"AutoModelForCausalLM": "modeling_dropped_llama.LlamaForCausalLM"
|
11 |
+
},
|
12 |
+
"bos_token_id": 1,
|
13 |
+
"drop_attn_list": [
|
14 |
+
22,
|
15 |
+
30,
|
16 |
+
32,
|
17 |
+
36,
|
18 |
+
43,
|
19 |
+
45,
|
20 |
+
46,
|
21 |
+
47,
|
22 |
+
48,
|
23 |
+
49,
|
24 |
+
50,
|
25 |
+
51,
|
26 |
+
52,
|
27 |
+
53,
|
28 |
+
54,
|
29 |
+
55,
|
30 |
+
56,
|
31 |
+
57,
|
32 |
+
58,
|
33 |
+
59,
|
34 |
+
60,
|
35 |
+
61,
|
36 |
+
62,
|
37 |
+
63,
|
38 |
+
64,
|
39 |
+
65,
|
40 |
+
66,
|
41 |
+
67,
|
42 |
+
68,
|
43 |
+
69,
|
44 |
+
70,
|
45 |
+
71,
|
46 |
+
72,
|
47 |
+
73,
|
48 |
+
74,
|
49 |
+
75,
|
50 |
+
76,
|
51 |
+
77,
|
52 |
+
78,
|
53 |
+
79
|
54 |
+
],
|
55 |
+
"drop_mlp_list": null,
|
56 |
+
"eos_token_id": 2,
|
57 |
+
"hidden_act": "silu",
|
58 |
+
"hidden_size": 8192,
|
59 |
+
"initializer_range": 0.02,
|
60 |
+
"intermediate_size": 28672,
|
61 |
+
"max_position_embeddings": 4096,
|
62 |
+
"mlp_bias": false,
|
63 |
+
"model_type": "llama",
|
64 |
+
"num_attention_heads": 64,
|
65 |
+
"num_hidden_layers": 80,
|
66 |
+
"num_key_value_heads": 8,
|
67 |
+
"pretraining_tp": 1,
|
68 |
+
"rms_norm_eps": 1e-05,
|
69 |
+
"rope_scaling": null,
|
70 |
+
"rope_theta": 10000.0,
|
71 |
+
"tie_word_embeddings": false,
|
72 |
+
"torch_dtype": "float16",
|
73 |
+
"transformers_version": "4.41.2",
|
74 |
+
"use_cache": true,
|
75 |
+
"vocab_size": 32000
|
76 |
+
}
|
configs/Llama2-70b-Drop4MLP/config.json
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "meta-llama/Llama-2-70b-hf",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"auto_map": {
|
9 |
+
"AutoConfig": "configuration_dropped_llama.LlamaConfig",
|
10 |
+
"AutoModelForCausalLM": "modeling_dropped_llama.LlamaForCausalLM"
|
11 |
+
},
|
12 |
+
"bos_token_id": 1,
|
13 |
+
"drop_attn_list": [
|
14 |
+
22,
|
15 |
+
30,
|
16 |
+
32,
|
17 |
+
36,
|
18 |
+
43,
|
19 |
+
45,
|
20 |
+
46,
|
21 |
+
47,
|
22 |
+
48,
|
23 |
+
49,
|
24 |
+
50,
|
25 |
+
51,
|
26 |
+
52,
|
27 |
+
53,
|
28 |
+
54,
|
29 |
+
55,
|
30 |
+
56,
|
31 |
+
57,
|
32 |
+
58,
|
33 |
+
59,
|
34 |
+
60,
|
35 |
+
61,
|
36 |
+
62,
|
37 |
+
63,
|
38 |
+
64,
|
39 |
+
65,
|
40 |
+
66,
|
41 |
+
67,
|
42 |
+
68,
|
43 |
+
69,
|
44 |
+
70,
|
45 |
+
71,
|
46 |
+
72,
|
47 |
+
73,
|
48 |
+
74,
|
49 |
+
75,
|
50 |
+
76,
|
51 |
+
77,
|
52 |
+
78,
|
53 |
+
79
|
54 |
+
],
|
55 |
+
"drop_mlp_list": null,
|
56 |
+
"eos_token_id": 2,
|
57 |
+
"hidden_act": "silu",
|
58 |
+
"hidden_size": 8192,
|
59 |
+
"initializer_range": 0.02,
|
60 |
+
"intermediate_size": 28672,
|
61 |
+
"max_position_embeddings": 4096,
|
62 |
+
"mlp_bias": false,
|
63 |
+
"model_type": "llama",
|
64 |
+
"num_attention_heads": 64,
|
65 |
+
"num_hidden_layers": 80,
|
66 |
+
"num_key_value_heads": 8,
|
67 |
+
"pretraining_tp": 1,
|
68 |
+
"rms_norm_eps": 1e-05,
|
69 |
+
"rope_scaling": null,
|
70 |
+
"rope_theta": 10000.0,
|
71 |
+
"tie_word_embeddings": false,
|
72 |
+
"torch_dtype": "float16",
|
73 |
+
"transformers_version": "4.41.2",
|
74 |
+
"use_cache": true,
|
75 |
+
"vocab_size": 32000
|
76 |
+
}
|
configs/Llama2-70b-Drop8Attn/config.json
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "meta-llama/Llama-2-70b-hf",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"auto_map": {
|
9 |
+
"AutoConfig": "configuration_dropped_llama.LlamaConfig",
|
10 |
+
"AutoModelForCausalLM": "modeling_dropped_llama.LlamaForCausalLM"
|
11 |
+
},
|
12 |
+
"bos_token_id": 1,
|
13 |
+
"drop_attn_list": [
|
14 |
+
22,
|
15 |
+
30,
|
16 |
+
32,
|
17 |
+
36,
|
18 |
+
43,
|
19 |
+
45,
|
20 |
+
46,
|
21 |
+
47,
|
22 |
+
48,
|
23 |
+
49,
|
24 |
+
50,
|
25 |
+
51,
|
26 |
+
52,
|
27 |
+
53,
|
28 |
+
54,
|
29 |
+
55,
|
30 |
+
56,
|
31 |
+
57,
|
32 |
+
58,
|
33 |
+
59,
|
34 |
+
60,
|
35 |
+
61,
|
36 |
+
62,
|
37 |
+
63,
|
38 |
+
64,
|
39 |
+
65,
|
40 |
+
66,
|
41 |
+
67,
|
42 |
+
68,
|
43 |
+
69,
|
44 |
+
70,
|
45 |
+
71,
|
46 |
+
72,
|
47 |
+
73,
|
48 |
+
74,
|
49 |
+
75,
|
50 |
+
76,
|
51 |
+
77,
|
52 |
+
78,
|
53 |
+
79
|
54 |
+
],
|
55 |
+
"drop_mlp_list": null,
|
56 |
+
"eos_token_id": 2,
|
57 |
+
"hidden_act": "silu",
|
58 |
+
"hidden_size": 8192,
|
59 |
+
"initializer_range": 0.02,
|
60 |
+
"intermediate_size": 28672,
|
61 |
+
"max_position_embeddings": 4096,
|
62 |
+
"mlp_bias": false,
|
63 |
+
"model_type": "llama",
|
64 |
+
"num_attention_heads": 64,
|
65 |
+
"num_hidden_layers": 80,
|
66 |
+
"num_key_value_heads": 8,
|
67 |
+
"pretraining_tp": 1,
|
68 |
+
"rms_norm_eps": 1e-05,
|
69 |
+
"rope_scaling": null,
|
70 |
+
"rope_theta": 10000.0,
|
71 |
+
"tie_word_embeddings": false,
|
72 |
+
"torch_dtype": "float16",
|
73 |
+
"transformers_version": "4.41.2",
|
74 |
+
"use_cache": true,
|
75 |
+
"vocab_size": 32000
|
76 |
+
}
|
configs/Llama2-70b-Drop8Block/config.json
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "meta-llama/Llama-2-70b-hf",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"auto_map": {
|
9 |
+
"AutoConfig": "configuration_dropped_llama.LlamaConfig",
|
10 |
+
"AutoModelForCausalLM": "modeling_dropped_llama.LlamaForCausalLM"
|
11 |
+
},
|
12 |
+
"bos_token_id": 1,
|
13 |
+
"drop_attn_list": [
|
14 |
+
22,
|
15 |
+
30,
|
16 |
+
32,
|
17 |
+
36,
|
18 |
+
43,
|
19 |
+
45,
|
20 |
+
46,
|
21 |
+
47,
|
22 |
+
48,
|
23 |
+
49,
|
24 |
+
50,
|
25 |
+
51,
|
26 |
+
52,
|
27 |
+
53,
|
28 |
+
54,
|
29 |
+
55,
|
30 |
+
56,
|
31 |
+
57,
|
32 |
+
58,
|
33 |
+
59,
|
34 |
+
60,
|
35 |
+
61,
|
36 |
+
62,
|
37 |
+
63,
|
38 |
+
64,
|
39 |
+
65,
|
40 |
+
66,
|
41 |
+
67,
|
42 |
+
68,
|
43 |
+
69,
|
44 |
+
70,
|
45 |
+
71,
|
46 |
+
72,
|
47 |
+
73,
|
48 |
+
74,
|
49 |
+
75,
|
50 |
+
76,
|
51 |
+
77,
|
52 |
+
78,
|
53 |
+
79
|
54 |
+
],
|
55 |
+
"drop_mlp_list": null,
|
56 |
+
"eos_token_id": 2,
|
57 |
+
"hidden_act": "silu",
|
58 |
+
"hidden_size": 8192,
|
59 |
+
"initializer_range": 0.02,
|
60 |
+
"intermediate_size": 28672,
|
61 |
+
"max_position_embeddings": 4096,
|
62 |
+
"mlp_bias": false,
|
63 |
+
"model_type": "llama",
|
64 |
+
"num_attention_heads": 64,
|
65 |
+
"num_hidden_layers": 80,
|
66 |
+
"num_key_value_heads": 8,
|
67 |
+
"pretraining_tp": 1,
|
68 |
+
"rms_norm_eps": 1e-05,
|
69 |
+
"rope_scaling": null,
|
70 |
+
"rope_theta": 10000.0,
|
71 |
+
"tie_word_embeddings": false,
|
72 |
+
"torch_dtype": "float16",
|
73 |
+
"transformers_version": "4.41.2",
|
74 |
+
"use_cache": true,
|
75 |
+
"vocab_size": 32000
|
76 |
+
}
|
configs/Llama2-70b-Drop8MLP/config.json
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "meta-llama/Llama-2-70b-hf",
|
3 |
+
"architectures": [
|
4 |
+
"LlamaForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"auto_map": {
|
9 |
+
"AutoConfig": "configuration_dropped_llama.LlamaConfig",
|
10 |
+
"AutoModelForCausalLM": "modeling_dropped_llama.LlamaForCausalLM"
|
11 |
+
},
|
12 |
+
"bos_token_id": 1,
|
13 |
+
"drop_attn_list": [
|
14 |
+
22,
|
15 |
+
30,
|
16 |
+
32,
|
17 |
+
36,
|
18 |
+
43,
|
19 |
+
45,
|
20 |
+
46,
|
21 |
+
47,
|
22 |
+
48,
|
23 |
+
49,
|
24 |
+
50,
|
25 |
+
51,
|
26 |
+
52,
|
27 |
+
53,
|
28 |
+
54,
|
29 |
+
55,
|
30 |
+
56,
|
31 |
+
57,
|
32 |
+
58,
|
33 |
+
59,
|
34 |
+
60,
|
35 |
+
61,
|
36 |
+
62,
|
37 |
+
63,
|
38 |
+
64,
|
39 |
+
65,
|
40 |
+
66,
|
41 |
+
67,
|
42 |
+
68,
|
43 |
+
69,
|
44 |
+
70,
|
45 |
+
71,
|
46 |
+
72,
|
47 |
+
73,
|
48 |
+
74,
|
49 |
+
75,
|
50 |
+
76,
|
51 |
+
77,
|
52 |
+
78,
|
53 |
+
79
|
54 |
+
],
|
55 |
+
"drop_mlp_list": null,
|
56 |
+
"eos_token_id": 2,
|
57 |
+
"hidden_act": "silu",
|
58 |
+
"hidden_size": 8192,
|
59 |
+
"initializer_range": 0.02,
|
60 |
+
"intermediate_size": 28672,
|
61 |
+
"max_position_embeddings": 4096,
|
62 |
+
"mlp_bias": false,
|
63 |
+
"model_type": "llama",
|
64 |
+
"num_attention_heads": 64,
|
65 |
+
"num_hidden_layers": 80,
|
66 |
+
"num_key_value_heads": 8,
|
67 |
+
"pretraining_tp": 1,
|
68 |
+
"rms_norm_eps": 1e-05,
|
69 |
+
"rope_scaling": null,
|
70 |
+
"rope_theta": 10000.0,
|
71 |
+
"tie_word_embeddings": false,
|
72 |
+
"torch_dtype": "float16",
|
73 |
+
"transformers_version": "4.41.2",
|
74 |
+
"use_cache": true,
|
75 |
+
"vocab_size": 32000
|
76 |
+
}
|
configuration_dropped_llama.py
ADDED
@@ -0,0 +1,232 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
5 |
+
# and OPT implementations in this library. It has been modified from its
|
6 |
+
# original forms to accommodate minor architectural differences compared
|
7 |
+
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
8 |
+
#
|
9 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
10 |
+
# you may not use this file except in compliance with the License.
|
11 |
+
# You may obtain a copy of the License at
|
12 |
+
#
|
13 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
14 |
+
#
|
15 |
+
# Unless required by applicable law or agreed to in writing, software
|
16 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
17 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
18 |
+
# See the License for the specific language governing permissions and
|
19 |
+
# limitations under the License.
|
20 |
+
""" transformers==4.38.1"""
|
21 |
+
""" LLaMA model configuration"""
|
22 |
+
from transformers.configuration_utils import PretrainedConfig
|
23 |
+
from transformers.utils import logging
|
24 |
+
|
25 |
+
|
26 |
+
logger = logging.get_logger(__name__)
|
27 |
+
|
28 |
+
LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
|
29 |
+
|
30 |
+
|
31 |
+
class LlamaConfig(PretrainedConfig):
|
32 |
+
r"""
|
33 |
+
This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA
|
34 |
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
35 |
+
defaults will yield a similar configuration to that of the LLaMA-7B.
|
36 |
+
|
37 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
38 |
+
documentation from [`PretrainedConfig`] for more information.
|
39 |
+
|
40 |
+
|
41 |
+
Args:
|
42 |
+
vocab_size (`int`, *optional*, defaults to 32000):
|
43 |
+
Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the
|
44 |
+
`inputs_ids` passed when calling [`LlamaModel`]
|
45 |
+
hidden_size (`int`, *optional*, defaults to 4096):
|
46 |
+
Dimension of the hidden representations.
|
47 |
+
intermediate_size (`int`, *optional*, defaults to 11008):
|
48 |
+
Dimension of the MLP representations.
|
49 |
+
num_hidden_layers (`int`, *optional*, defaults to 32):
|
50 |
+
Number of hidden layers in the Transformer decoder.
|
51 |
+
num_attention_heads (`int`, *optional*, defaults to 32):
|
52 |
+
Number of attention heads for each attention layer in the Transformer decoder.
|
53 |
+
num_key_value_heads (`int`, *optional*):
|
54 |
+
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
55 |
+
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
56 |
+
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
|
57 |
+
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
|
58 |
+
by meanpooling all the original heads within that group. For more details checkout [this
|
59 |
+
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
|
60 |
+
`num_attention_heads`.
|
61 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
62 |
+
The non-linear activation function (function or string) in the decoder.
|
63 |
+
max_position_embeddings (`int`, *optional*, defaults to 2048):
|
64 |
+
The maximum sequence length that this model might ever be used with. Llama 1 supports up to 2048 tokens,
|
65 |
+
Llama 2 up to 4096, CodeLlama up to 16384.
|
66 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
67 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
68 |
+
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
|
69 |
+
The epsilon used by the rms normalization layers.
|
70 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
71 |
+
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
72 |
+
relevant if `config.is_decoder=True`.
|
73 |
+
pad_token_id (`int`, *optional*):
|
74 |
+
Padding token id.
|
75 |
+
bos_token_id (`int`, *optional*, defaults to 1):
|
76 |
+
Beginning of stream token id.
|
77 |
+
eos_token_id (`int`, *optional*, defaults to 2):
|
78 |
+
End of stream token id.
|
79 |
+
pretraining_tp (`int`, *optional*, defaults to 1):
|
80 |
+
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
|
81 |
+
document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to understand more about it. This value is
|
82 |
+
necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
|
83 |
+
issue](https://github.com/pytorch/pytorch/issues/76232).
|
84 |
+
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
85 |
+
Whether to tie weight embeddings
|
86 |
+
rope_theta (`float`, *optional*, defaults to 10000.0):
|
87 |
+
The base period of the RoPE embeddings.
|
88 |
+
rope_scaling (`Dict`, *optional*):
|
89 |
+
Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
|
90 |
+
strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
|
91 |
+
`{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
|
92 |
+
`max_position_embeddings` to the expected new maximum. See the following thread for more information on how
|
93 |
+
these scaling strategies behave:
|
94 |
+
https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
|
95 |
+
experimental feature, subject to breaking API changes in future versions.
|
96 |
+
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
|
97 |
+
Whether to use a bias in the query, key, value and output projection layers during self-attention.
|
98 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
99 |
+
The dropout ratio for the attention probabilities.
|
100 |
+
|
101 |
+
```python
|
102 |
+
>>> from transformers import LlamaModel, LlamaConfig
|
103 |
+
|
104 |
+
>>> # Initializing a LLaMA llama-7b style configuration
|
105 |
+
>>> configuration = LlamaConfig()
|
106 |
+
|
107 |
+
>>> # Initializing a model from the llama-7b style configuration
|
108 |
+
>>> model = LlamaModel(configuration)
|
109 |
+
|
110 |
+
>>> # Accessing the model configuration
|
111 |
+
>>> configuration = model.config
|
112 |
+
```"""
|
113 |
+
|
114 |
+
model_type = "llama"
|
115 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
116 |
+
|
117 |
+
def __init__(
|
118 |
+
self,
|
119 |
+
vocab_size=32000,
|
120 |
+
hidden_size=4096,
|
121 |
+
intermediate_size=11008,
|
122 |
+
num_hidden_layers=32,
|
123 |
+
num_attention_heads=32,
|
124 |
+
num_key_value_heads=None,
|
125 |
+
hidden_act="silu",
|
126 |
+
max_position_embeddings=2048,
|
127 |
+
initializer_range=0.02,
|
128 |
+
rms_norm_eps=1e-6,
|
129 |
+
use_cache=True,
|
130 |
+
pad_token_id=None,
|
131 |
+
bos_token_id=1,
|
132 |
+
eos_token_id=2,
|
133 |
+
pretraining_tp=1,
|
134 |
+
tie_word_embeddings=False,
|
135 |
+
rope_theta=10000.0,
|
136 |
+
rope_scaling=None,
|
137 |
+
attention_bias=False,
|
138 |
+
attention_dropout=0.0,
|
139 |
+
drop_mlp_list=None,
|
140 |
+
drop_attn_list=None,
|
141 |
+
**kwargs,
|
142 |
+
):
|
143 |
+
self.vocab_size = vocab_size
|
144 |
+
self.max_position_embeddings = max_position_embeddings
|
145 |
+
self.hidden_size = hidden_size
|
146 |
+
self.intermediate_size = intermediate_size
|
147 |
+
self.num_hidden_layers = num_hidden_layers
|
148 |
+
self.num_attention_heads = num_attention_heads
|
149 |
+
|
150 |
+
#####################################################################################################################
|
151 |
+
|
152 |
+
# ✨ trans bool into int
|
153 |
+
new_drop_attn_list = []
|
154 |
+
if drop_attn_list is not None:
|
155 |
+
for idx in range(len(drop_attn_list)):
|
156 |
+
if isinstance(drop_attn_list[idx], bool):
|
157 |
+
if drop_attn_list[idx] == True:
|
158 |
+
new_drop_attn_list.append(idx)
|
159 |
+
elif isinstance(drop_attn_list[idx], int):
|
160 |
+
new_drop_attn_list.append(drop_attn_list[idx])
|
161 |
+
|
162 |
+
new_drop_mlp_list = []
|
163 |
+
if drop_mlp_list is not None:
|
164 |
+
for idx in range(len(drop_mlp_list)):
|
165 |
+
if isinstance(drop_mlp_list[idx], bool):
|
166 |
+
if drop_mlp_list[idx] == True:
|
167 |
+
new_drop_mlp_list.append(idx)
|
168 |
+
elif isinstance(drop_mlp_list[idx], int):
|
169 |
+
new_drop_mlp_list.append(drop_mlp_list[idx])
|
170 |
+
|
171 |
+
#####################################################################################################################
|
172 |
+
|
173 |
+
if new_drop_mlp_list:
|
174 |
+
self.drop_mlp_list = []
|
175 |
+
for idx in range(self.num_hidden_layers):
|
176 |
+
self.drop_mlp_list.append(True if idx in new_drop_mlp_list else False)
|
177 |
+
else:
|
178 |
+
self.drop_mlp_list = [False] * self.num_hidden_layers
|
179 |
+
|
180 |
+
if new_drop_attn_list:
|
181 |
+
self.drop_attn_list = []
|
182 |
+
for idx in range(self.num_hidden_layers):
|
183 |
+
self.drop_attn_list.append(True if idx in new_drop_attn_list else False)
|
184 |
+
else:
|
185 |
+
self.drop_attn_list = [False] * self.num_hidden_layers
|
186 |
+
|
187 |
+
#####################################################################################################################
|
188 |
+
|
189 |
+
# for backward compatibility
|
190 |
+
if num_key_value_heads is None:
|
191 |
+
num_key_value_heads = num_attention_heads
|
192 |
+
|
193 |
+
self.num_key_value_heads = num_key_value_heads
|
194 |
+
self.hidden_act = hidden_act
|
195 |
+
self.initializer_range = initializer_range
|
196 |
+
self.rms_norm_eps = rms_norm_eps
|
197 |
+
self.pretraining_tp = pretraining_tp
|
198 |
+
self.use_cache = use_cache
|
199 |
+
self.rope_theta = rope_theta
|
200 |
+
self.rope_scaling = rope_scaling
|
201 |
+
self._rope_scaling_validation()
|
202 |
+
self.attention_bias = attention_bias
|
203 |
+
self.attention_dropout = attention_dropout
|
204 |
+
|
205 |
+
super().__init__(
|
206 |
+
pad_token_id=pad_token_id,
|
207 |
+
bos_token_id=bos_token_id,
|
208 |
+
eos_token_id=eos_token_id,
|
209 |
+
tie_word_embeddings=tie_word_embeddings,
|
210 |
+
**kwargs,
|
211 |
+
)
|
212 |
+
|
213 |
+
def _rope_scaling_validation(self):
|
214 |
+
"""
|
215 |
+
Validate the `rope_scaling` configuration.
|
216 |
+
"""
|
217 |
+
if self.rope_scaling is None:
|
218 |
+
return
|
219 |
+
|
220 |
+
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
|
221 |
+
raise ValueError(
|
222 |
+
"`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
|
223 |
+
f"got {self.rope_scaling}"
|
224 |
+
)
|
225 |
+
rope_scaling_type = self.rope_scaling.get("type", None)
|
226 |
+
rope_scaling_factor = self.rope_scaling.get("factor", None)
|
227 |
+
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
|
228 |
+
raise ValueError(
|
229 |
+
f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
|
230 |
+
)
|
231 |
+
if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
|
232 |
+
raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
|
generation_config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token_id": 1,
|
3 |
+
"do_sample": true,
|
4 |
+
"eos_token_id": 2,
|
5 |
+
"pad_token_id": 0,
|
6 |
+
"max_length": 4096,
|
7 |
+
"temperature": 0.6,
|
8 |
+
"top_p": 0.9,
|
9 |
+
"transformers_version": "4.32.0.dev0"
|
10 |
+
}
|
model.safetensors.index.json
ADDED
@@ -0,0 +1,810 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_size": 137953316864
|
4 |
+
},
|
5 |
+
"weight_map": {
|
6 |
+
"lm_head.weight": "model-00015-of-00015.safetensors",
|
7 |
+
"model.embed_tokens.weight": "model-00001-of-00015.safetensors",
|
8 |
+
"model.layers.0.input_layernorm.weight": "model-00001-of-00015.safetensors",
|
9 |
+
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00015.safetensors",
|
10 |
+
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00015.safetensors",
|
11 |
+
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00015.safetensors",
|
12 |
+
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00015.safetensors",
|
13 |
+
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00015.safetensors",
|
14 |
+
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00015.safetensors",
|
15 |
+
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00015.safetensors",
|
16 |
+
"model.layers.0.self_attn.rotary_emb.inv_freq": "model-00001-of-00015.safetensors",
|
17 |
+
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00015.safetensors",
|
18 |
+
"model.layers.1.input_layernorm.weight": "model-00001-of-00015.safetensors",
|
19 |
+
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00015.safetensors",
|
20 |
+
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00015.safetensors",
|
21 |
+
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00015.safetensors",
|
22 |
+
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00015.safetensors",
|
23 |
+
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00015.safetensors",
|
24 |
+
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00015.safetensors",
|
25 |
+
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00015.safetensors",
|
26 |
+
"model.layers.1.self_attn.rotary_emb.inv_freq": "model-00001-of-00015.safetensors",
|
27 |
+
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00015.safetensors",
|
28 |
+
"model.layers.10.input_layernorm.weight": "model-00002-of-00015.safetensors",
|
29 |
+
"model.layers.10.mlp.down_proj.weight": "model-00002-of-00015.safetensors",
|
30 |
+
"model.layers.10.mlp.gate_proj.weight": "model-00002-of-00015.safetensors",
|
31 |
+
"model.layers.10.mlp.up_proj.weight": "model-00002-of-00015.safetensors",
|
32 |
+
"model.layers.10.post_attention_layernorm.weight": "model-00002-of-00015.safetensors",
|
33 |
+
"model.layers.10.self_attn.k_proj.weight": "model-00002-of-00015.safetensors",
|
34 |
+
"model.layers.10.self_attn.o_proj.weight": "model-00002-of-00015.safetensors",
|
35 |
+
"model.layers.10.self_attn.q_proj.weight": "model-00002-of-00015.safetensors",
|
36 |
+
"model.layers.10.self_attn.rotary_emb.inv_freq": "model-00002-of-00015.safetensors",
|
37 |
+
"model.layers.10.self_attn.v_proj.weight": "model-00002-of-00015.safetensors",
|
38 |
+
"model.layers.11.input_layernorm.weight": "model-00003-of-00015.safetensors",
|
39 |
+
"model.layers.11.mlp.down_proj.weight": "model-00003-of-00015.safetensors",
|
40 |
+
"model.layers.11.mlp.gate_proj.weight": "model-00003-of-00015.safetensors",
|
41 |
+
"model.layers.11.mlp.up_proj.weight": "model-00003-of-00015.safetensors",
|
42 |
+
"model.layers.11.post_attention_layernorm.weight": "model-00003-of-00015.safetensors",
|
43 |
+
"model.layers.11.self_attn.k_proj.weight": "model-00002-of-00015.safetensors",
|
44 |
+
"model.layers.11.self_attn.o_proj.weight": "model-00002-of-00015.safetensors",
|
45 |
+
"model.layers.11.self_attn.q_proj.weight": "model-00002-of-00015.safetensors",
|
46 |
+
"model.layers.11.self_attn.rotary_emb.inv_freq": "model-00002-of-00015.safetensors",
|
47 |
+
"model.layers.11.self_attn.v_proj.weight": "model-00002-of-00015.safetensors",
|
48 |
+
"model.layers.12.input_layernorm.weight": "model-00003-of-00015.safetensors",
|
49 |
+
"model.layers.12.mlp.down_proj.weight": "model-00003-of-00015.safetensors",
|
50 |
+
"model.layers.12.mlp.gate_proj.weight": "model-00003-of-00015.safetensors",
|
51 |
+
"model.layers.12.mlp.up_proj.weight": "model-00003-of-00015.safetensors",
|
52 |
+
"model.layers.12.post_attention_layernorm.weight": "model-00003-of-00015.safetensors",
|
53 |
+
"model.layers.12.self_attn.k_proj.weight": "model-00003-of-00015.safetensors",
|
54 |
+
"model.layers.12.self_attn.o_proj.weight": "model-00003-of-00015.safetensors",
|
55 |
+
"model.layers.12.self_attn.q_proj.weight": "model-00003-of-00015.safetensors",
|
56 |
+
"model.layers.12.self_attn.rotary_emb.inv_freq": "model-00003-of-00015.safetensors",
|
57 |
+
"model.layers.12.self_attn.v_proj.weight": "model-00003-of-00015.safetensors",
|
58 |
+
"model.layers.13.input_layernorm.weight": "model-00003-of-00015.safetensors",
|
59 |
+
"model.layers.13.mlp.down_proj.weight": "model-00003-of-00015.safetensors",
|
60 |
+
"model.layers.13.mlp.gate_proj.weight": "model-00003-of-00015.safetensors",
|
61 |
+
"model.layers.13.mlp.up_proj.weight": "model-00003-of-00015.safetensors",
|
62 |
+
"model.layers.13.post_attention_layernorm.weight": "model-00003-of-00015.safetensors",
|
63 |
+
"model.layers.13.self_attn.k_proj.weight": "model-00003-of-00015.safetensors",
|
64 |
+
"model.layers.13.self_attn.o_proj.weight": "model-00003-of-00015.safetensors",
|
65 |
+
"model.layers.13.self_attn.q_proj.weight": "model-00003-of-00015.safetensors",
|
66 |
+
"model.layers.13.self_attn.rotary_emb.inv_freq": "model-00003-of-00015.safetensors",
|
67 |
+
"model.layers.13.self_attn.v_proj.weight": "model-00003-of-00015.safetensors",
|
68 |
+
"model.layers.14.input_layernorm.weight": "model-00003-of-00015.safetensors",
|
69 |
+
"model.layers.14.mlp.down_proj.weight": "model-00003-of-00015.safetensors",
|
70 |
+
"model.layers.14.mlp.gate_proj.weight": "model-00003-of-00015.safetensors",
|
71 |
+
"model.layers.14.mlp.up_proj.weight": "model-00003-of-00015.safetensors",
|
72 |
+
"model.layers.14.post_attention_layernorm.weight": "model-00003-of-00015.safetensors",
|
73 |
+
"model.layers.14.self_attn.k_proj.weight": "model-00003-of-00015.safetensors",
|
74 |
+
"model.layers.14.self_attn.o_proj.weight": "model-00003-of-00015.safetensors",
|
75 |
+
"model.layers.14.self_attn.q_proj.weight": "model-00003-of-00015.safetensors",
|
76 |
+
"model.layers.14.self_attn.rotary_emb.inv_freq": "model-00003-of-00015.safetensors",
|
77 |
+
"model.layers.14.self_attn.v_proj.weight": "model-00003-of-00015.safetensors",
|
78 |
+
"model.layers.15.input_layernorm.weight": "model-00003-of-00015.safetensors",
|
79 |
+
"model.layers.15.mlp.down_proj.weight": "model-00003-of-00015.safetensors",
|
80 |
+
"model.layers.15.mlp.gate_proj.weight": "model-00003-of-00015.safetensors",
|
81 |
+
"model.layers.15.mlp.up_proj.weight": "model-00003-of-00015.safetensors",
|
82 |
+
"model.layers.15.post_attention_layernorm.weight": "model-00003-of-00015.safetensors",
|
83 |
+
"model.layers.15.self_attn.k_proj.weight": "model-00003-of-00015.safetensors",
|
84 |
+
"model.layers.15.self_attn.o_proj.weight": "model-00003-of-00015.safetensors",
|
85 |
+
"model.layers.15.self_attn.q_proj.weight": "model-00003-of-00015.safetensors",
|
86 |
+
"model.layers.15.self_attn.rotary_emb.inv_freq": "model-00003-of-00015.safetensors",
|
87 |
+
"model.layers.15.self_attn.v_proj.weight": "model-00003-of-00015.safetensors",
|
88 |
+
"model.layers.16.input_layernorm.weight": "model-00003-of-00015.safetensors",
|
89 |
+
"model.layers.16.mlp.down_proj.weight": "model-00003-of-00015.safetensors",
|
90 |
+
"model.layers.16.mlp.gate_proj.weight": "model-00003-of-00015.safetensors",
|
91 |
+
"model.layers.16.mlp.up_proj.weight": "model-00003-of-00015.safetensors",
|
92 |
+
"model.layers.16.post_attention_layernorm.weight": "model-00003-of-00015.safetensors",
|
93 |
+
"model.layers.16.self_attn.k_proj.weight": "model-00003-of-00015.safetensors",
|
94 |
+
"model.layers.16.self_attn.o_proj.weight": "model-00003-of-00015.safetensors",
|
95 |
+
"model.layers.16.self_attn.q_proj.weight": "model-00003-of-00015.safetensors",
|
96 |
+
"model.layers.16.self_attn.rotary_emb.inv_freq": "model-00003-of-00015.safetensors",
|
97 |
+
"model.layers.16.self_attn.v_proj.weight": "model-00003-of-00015.safetensors",
|
98 |
+
"model.layers.17.input_layernorm.weight": "model-00004-of-00015.safetensors",
|
99 |
+
"model.layers.17.mlp.down_proj.weight": "model-00004-of-00015.safetensors",
|
100 |
+
"model.layers.17.mlp.gate_proj.weight": "model-00004-of-00015.safetensors",
|
101 |
+
"model.layers.17.mlp.up_proj.weight": "model-00004-of-00015.safetensors",
|
102 |
+
"model.layers.17.post_attention_layernorm.weight": "model-00004-of-00015.safetensors",
|
103 |
+
"model.layers.17.self_attn.k_proj.weight": "model-00004-of-00015.safetensors",
|
104 |
+
"model.layers.17.self_attn.o_proj.weight": "model-00004-of-00015.safetensors",
|
105 |
+
"model.layers.17.self_attn.q_proj.weight": "model-00004-of-00015.safetensors",
|
106 |
+
"model.layers.17.self_attn.rotary_emb.inv_freq": "model-00004-of-00015.safetensors",
|
107 |
+
"model.layers.17.self_attn.v_proj.weight": "model-00004-of-00015.safetensors",
|
108 |
+
"model.layers.18.input_layernorm.weight": "model-00004-of-00015.safetensors",
|
109 |
+
"model.layers.18.mlp.down_proj.weight": "model-00004-of-00015.safetensors",
|
110 |
+
"model.layers.18.mlp.gate_proj.weight": "model-00004-of-00015.safetensors",
|
111 |
+
"model.layers.18.mlp.up_proj.weight": "model-00004-of-00015.safetensors",
|
112 |
+
"model.layers.18.post_attention_layernorm.weight": "model-00004-of-00015.safetensors",
|
113 |
+
"model.layers.18.self_attn.k_proj.weight": "model-00004-of-00015.safetensors",
|
114 |
+
"model.layers.18.self_attn.o_proj.weight": "model-00004-of-00015.safetensors",
|
115 |
+
"model.layers.18.self_attn.q_proj.weight": "model-00004-of-00015.safetensors",
|
116 |
+
"model.layers.18.self_attn.rotary_emb.inv_freq": "model-00004-of-00015.safetensors",
|
117 |
+
"model.layers.18.self_attn.v_proj.weight": "model-00004-of-00015.safetensors",
|
118 |
+
"model.layers.19.input_layernorm.weight": "model-00004-of-00015.safetensors",
|
119 |
+
"model.layers.19.mlp.down_proj.weight": "model-00004-of-00015.safetensors",
|
120 |
+
"model.layers.19.mlp.gate_proj.weight": "model-00004-of-00015.safetensors",
|
121 |
+
"model.layers.19.mlp.up_proj.weight": "model-00004-of-00015.safetensors",
|
122 |
+
"model.layers.19.post_attention_layernorm.weight": "model-00004-of-00015.safetensors",
|
123 |
+
"model.layers.19.self_attn.k_proj.weight": "model-00004-of-00015.safetensors",
|
124 |
+
"model.layers.19.self_attn.o_proj.weight": "model-00004-of-00015.safetensors",
|
125 |
+
"model.layers.19.self_attn.q_proj.weight": "model-00004-of-00015.safetensors",
|
126 |
+
"model.layers.19.self_attn.rotary_emb.inv_freq": "model-00004-of-00015.safetensors",
|
127 |
+
"model.layers.19.self_attn.v_proj.weight": "model-00004-of-00015.safetensors",
|
128 |
+
"model.layers.2.input_layernorm.weight": "model-00001-of-00015.safetensors",
|
129 |
+
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00015.safetensors",
|
130 |
+
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00015.safetensors",
|
131 |
+
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00015.safetensors",
|
132 |
+
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00015.safetensors",
|
133 |
+
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00015.safetensors",
|
134 |
+
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00015.safetensors",
|
135 |
+
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00015.safetensors",
|
136 |
+
"model.layers.2.self_attn.rotary_emb.inv_freq": "model-00001-of-00015.safetensors",
|
137 |
+
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00015.safetensors",
|
138 |
+
"model.layers.20.input_layernorm.weight": "model-00004-of-00015.safetensors",
|
139 |
+
"model.layers.20.mlp.down_proj.weight": "model-00004-of-00015.safetensors",
|
140 |
+
"model.layers.20.mlp.gate_proj.weight": "model-00004-of-00015.safetensors",
|
141 |
+
"model.layers.20.mlp.up_proj.weight": "model-00004-of-00015.safetensors",
|
142 |
+
"model.layers.20.post_attention_layernorm.weight": "model-00004-of-00015.safetensors",
|
143 |
+
"model.layers.20.self_attn.k_proj.weight": "model-00004-of-00015.safetensors",
|
144 |
+
"model.layers.20.self_attn.o_proj.weight": "model-00004-of-00015.safetensors",
|
145 |
+
"model.layers.20.self_attn.q_proj.weight": "model-00004-of-00015.safetensors",
|
146 |
+
"model.layers.20.self_attn.rotary_emb.inv_freq": "model-00004-of-00015.safetensors",
|
147 |
+
"model.layers.20.self_attn.v_proj.weight": "model-00004-of-00015.safetensors",
|
148 |
+
"model.layers.21.input_layernorm.weight": "model-00004-of-00015.safetensors",
|
149 |
+
"model.layers.21.mlp.down_proj.weight": "model-00004-of-00015.safetensors",
|
150 |
+
"model.layers.21.mlp.gate_proj.weight": "model-00004-of-00015.safetensors",
|
151 |
+
"model.layers.21.mlp.up_proj.weight": "model-00004-of-00015.safetensors",
|
152 |
+
"model.layers.21.post_attention_layernorm.weight": "model-00004-of-00015.safetensors",
|
153 |
+
"model.layers.21.self_attn.k_proj.weight": "model-00004-of-00015.safetensors",
|
154 |
+
"model.layers.21.self_attn.o_proj.weight": "model-00004-of-00015.safetensors",
|
155 |
+
"model.layers.21.self_attn.q_proj.weight": "model-00004-of-00015.safetensors",
|
156 |
+
"model.layers.21.self_attn.rotary_emb.inv_freq": "model-00004-of-00015.safetensors",
|
157 |
+
"model.layers.21.self_attn.v_proj.weight": "model-00004-of-00015.safetensors",
|
158 |
+
"model.layers.22.input_layernorm.weight": "model-00005-of-00015.safetensors",
|
159 |
+
"model.layers.22.mlp.down_proj.weight": "model-00004-of-00015.safetensors",
|
160 |
+
"model.layers.22.mlp.gate_proj.weight": "model-00004-of-00015.safetensors",
|
161 |
+
"model.layers.22.mlp.up_proj.weight": "model-00005-of-00015.safetensors",
|
162 |
+
"model.layers.22.post_attention_layernorm.weight": "model-00005-of-00015.safetensors",
|
163 |
+
"model.layers.22.self_attn.k_proj.weight": "model-00004-of-00015.safetensors",
|
164 |
+
"model.layers.22.self_attn.o_proj.weight": "model-00004-of-00015.safetensors",
|
165 |
+
"model.layers.22.self_attn.q_proj.weight": "model-00004-of-00015.safetensors",
|
166 |
+
"model.layers.22.self_attn.rotary_emb.inv_freq": "model-00004-of-00015.safetensors",
|
167 |
+
"model.layers.22.self_attn.v_proj.weight": "model-00004-of-00015.safetensors",
|
168 |
+
"model.layers.23.input_layernorm.weight": "model-00005-of-00015.safetensors",
|
169 |
+
"model.layers.23.mlp.down_proj.weight": "model-00005-of-00015.safetensors",
|
170 |
+
"model.layers.23.mlp.gate_proj.weight": "model-00005-of-00015.safetensors",
|
171 |
+
"model.layers.23.mlp.up_proj.weight": "model-00005-of-00015.safetensors",
|
172 |
+
"model.layers.23.post_attention_layernorm.weight": "model-00005-of-00015.safetensors",
|
173 |
+
"model.layers.23.self_attn.k_proj.weight": "model-00005-of-00015.safetensors",
|
174 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00005-of-00015.safetensors",
|
175 |
+
"model.layers.23.self_attn.q_proj.weight": "model-00005-of-00015.safetensors",
|
176 |
+
"model.layers.23.self_attn.rotary_emb.inv_freq": "model-00005-of-00015.safetensors",
|
177 |
+
"model.layers.23.self_attn.v_proj.weight": "model-00005-of-00015.safetensors",
|
178 |
+
"model.layers.24.input_layernorm.weight": "model-00005-of-00015.safetensors",
|
179 |
+
"model.layers.24.mlp.down_proj.weight": "model-00005-of-00015.safetensors",
|
180 |
+
"model.layers.24.mlp.gate_proj.weight": "model-00005-of-00015.safetensors",
|
181 |
+
"model.layers.24.mlp.up_proj.weight": "model-00005-of-00015.safetensors",
|
182 |
+
"model.layers.24.post_attention_layernorm.weight": "model-00005-of-00015.safetensors",
|
183 |
+
"model.layers.24.self_attn.k_proj.weight": "model-00005-of-00015.safetensors",
|
184 |
+
"model.layers.24.self_attn.o_proj.weight": "model-00005-of-00015.safetensors",
|
185 |
+
"model.layers.24.self_attn.q_proj.weight": "model-00005-of-00015.safetensors",
|
186 |
+
"model.layers.24.self_attn.rotary_emb.inv_freq": "model-00005-of-00015.safetensors",
|
187 |
+
"model.layers.24.self_attn.v_proj.weight": "model-00005-of-00015.safetensors",
|
188 |
+
"model.layers.25.input_layernorm.weight": "model-00005-of-00015.safetensors",
|
189 |
+
"model.layers.25.mlp.down_proj.weight": "model-00005-of-00015.safetensors",
|
190 |
+
"model.layers.25.mlp.gate_proj.weight": "model-00005-of-00015.safetensors",
|
191 |
+
"model.layers.25.mlp.up_proj.weight": "model-00005-of-00015.safetensors",
|
192 |
+
"model.layers.25.post_attention_layernorm.weight": "model-00005-of-00015.safetensors",
|
193 |
+
"model.layers.25.self_attn.k_proj.weight": "model-00005-of-00015.safetensors",
|
194 |
+
"model.layers.25.self_attn.o_proj.weight": "model-00005-of-00015.safetensors",
|
195 |
+
"model.layers.25.self_attn.q_proj.weight": "model-00005-of-00015.safetensors",
|
196 |
+
"model.layers.25.self_attn.rotary_emb.inv_freq": "model-00005-of-00015.safetensors",
|
197 |
+
"model.layers.25.self_attn.v_proj.weight": "model-00005-of-00015.safetensors",
|
198 |
+
"model.layers.26.input_layernorm.weight": "model-00005-of-00015.safetensors",
|
199 |
+
"model.layers.26.mlp.down_proj.weight": "model-00005-of-00015.safetensors",
|
200 |
+
"model.layers.26.mlp.gate_proj.weight": "model-00005-of-00015.safetensors",
|
201 |
+
"model.layers.26.mlp.up_proj.weight": "model-00005-of-00015.safetensors",
|
202 |
+
"model.layers.26.post_attention_layernorm.weight": "model-00005-of-00015.safetensors",
|
203 |
+
"model.layers.26.self_attn.k_proj.weight": "model-00005-of-00015.safetensors",
|
204 |
+
"model.layers.26.self_attn.o_proj.weight": "model-00005-of-00015.safetensors",
|
205 |
+
"model.layers.26.self_attn.q_proj.weight": "model-00005-of-00015.safetensors",
|
206 |
+
"model.layers.26.self_attn.rotary_emb.inv_freq": "model-00005-of-00015.safetensors",
|
207 |
+
"model.layers.26.self_attn.v_proj.weight": "model-00005-of-00015.safetensors",
|
208 |
+
"model.layers.27.input_layernorm.weight": "model-00005-of-00015.safetensors",
|
209 |
+
"model.layers.27.mlp.down_proj.weight": "model-00005-of-00015.safetensors",
|
210 |
+
"model.layers.27.mlp.gate_proj.weight": "model-00005-of-00015.safetensors",
|
211 |
+
"model.layers.27.mlp.up_proj.weight": "model-00005-of-00015.safetensors",
|
212 |
+
"model.layers.27.post_attention_layernorm.weight": "model-00005-of-00015.safetensors",
|
213 |
+
"model.layers.27.self_attn.k_proj.weight": "model-00005-of-00015.safetensors",
|
214 |
+
"model.layers.27.self_attn.o_proj.weight": "model-00005-of-00015.safetensors",
|
215 |
+
"model.layers.27.self_attn.q_proj.weight": "model-00005-of-00015.safetensors",
|
216 |
+
"model.layers.27.self_attn.rotary_emb.inv_freq": "model-00005-of-00015.safetensors",
|
217 |
+
"model.layers.27.self_attn.v_proj.weight": "model-00005-of-00015.safetensors",
|
218 |
+
"model.layers.28.input_layernorm.weight": "model-00006-of-00015.safetensors",
|
219 |
+
"model.layers.28.mlp.down_proj.weight": "model-00006-of-00015.safetensors",
|
220 |
+
"model.layers.28.mlp.gate_proj.weight": "model-00005-of-00015.safetensors",
|
221 |
+
"model.layers.28.mlp.up_proj.weight": "model-00006-of-00015.safetensors",
|
222 |
+
"model.layers.28.post_attention_layernorm.weight": "model-00006-of-00015.safetensors",
|
223 |
+
"model.layers.28.self_attn.k_proj.weight": "model-00005-of-00015.safetensors",
|
224 |
+
"model.layers.28.self_attn.o_proj.weight": "model-00005-of-00015.safetensors",
|
225 |
+
"model.layers.28.self_attn.q_proj.weight": "model-00005-of-00015.safetensors",
|
226 |
+
"model.layers.28.self_attn.rotary_emb.inv_freq": "model-00005-of-00015.safetensors",
|
227 |
+
"model.layers.28.self_attn.v_proj.weight": "model-00005-of-00015.safetensors",
|
228 |
+
"model.layers.29.input_layernorm.weight": "model-00006-of-00015.safetensors",
|
229 |
+
"model.layers.29.mlp.down_proj.weight": "model-00006-of-00015.safetensors",
|
230 |
+
"model.layers.29.mlp.gate_proj.weight": "model-00006-of-00015.safetensors",
|
231 |
+
"model.layers.29.mlp.up_proj.weight": "model-00006-of-00015.safetensors",
|
232 |
+
"model.layers.29.post_attention_layernorm.weight": "model-00006-of-00015.safetensors",
|
233 |
+
"model.layers.29.self_attn.k_proj.weight": "model-00006-of-00015.safetensors",
|
234 |
+
"model.layers.29.self_attn.o_proj.weight": "model-00006-of-00015.safetensors",
|
235 |
+
"model.layers.29.self_attn.q_proj.weight": "model-00006-of-00015.safetensors",
|
236 |
+
"model.layers.29.self_attn.rotary_emb.inv_freq": "model-00006-of-00015.safetensors",
|
237 |
+
"model.layers.29.self_attn.v_proj.weight": "model-00006-of-00015.safetensors",
|
238 |
+
"model.layers.3.input_layernorm.weight": "model-00001-of-00015.safetensors",
|
239 |
+
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00015.safetensors",
|
240 |
+
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00015.safetensors",
|
241 |
+
"model.layers.3.mlp.up_proj.weight": "model-00001-of-00015.safetensors",
|
242 |
+
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00015.safetensors",
|
243 |
+
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00015.safetensors",
|
244 |
+
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00015.safetensors",
|
245 |
+
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00015.safetensors",
|
246 |
+
"model.layers.3.self_attn.rotary_emb.inv_freq": "model-00001-of-00015.safetensors",
|
247 |
+
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00015.safetensors",
|
248 |
+
"model.layers.30.input_layernorm.weight": "model-00006-of-00015.safetensors",
|
249 |
+
"model.layers.30.mlp.down_proj.weight": "model-00006-of-00015.safetensors",
|
250 |
+
"model.layers.30.mlp.gate_proj.weight": "model-00006-of-00015.safetensors",
|
251 |
+
"model.layers.30.mlp.up_proj.weight": "model-00006-of-00015.safetensors",
|
252 |
+
"model.layers.30.post_attention_layernorm.weight": "model-00006-of-00015.safetensors",
|
253 |
+
"model.layers.30.self_attn.k_proj.weight": "model-00006-of-00015.safetensors",
|
254 |
+
"model.layers.30.self_attn.o_proj.weight": "model-00006-of-00015.safetensors",
|
255 |
+
"model.layers.30.self_attn.q_proj.weight": "model-00006-of-00015.safetensors",
|
256 |
+
"model.layers.30.self_attn.rotary_emb.inv_freq": "model-00006-of-00015.safetensors",
|
257 |
+
"model.layers.30.self_attn.v_proj.weight": "model-00006-of-00015.safetensors",
|
258 |
+
"model.layers.31.input_layernorm.weight": "model-00006-of-00015.safetensors",
|
259 |
+
"model.layers.31.mlp.down_proj.weight": "model-00006-of-00015.safetensors",
|
260 |
+
"model.layers.31.mlp.gate_proj.weight": "model-00006-of-00015.safetensors",
|
261 |
+
"model.layers.31.mlp.up_proj.weight": "model-00006-of-00015.safetensors",
|
262 |
+
"model.layers.31.post_attention_layernorm.weight": "model-00006-of-00015.safetensors",
|
263 |
+
"model.layers.31.self_attn.k_proj.weight": "model-00006-of-00015.safetensors",
|
264 |
+
"model.layers.31.self_attn.o_proj.weight": "model-00006-of-00015.safetensors",
|
265 |
+
"model.layers.31.self_attn.q_proj.weight": "model-00006-of-00015.safetensors",
|
266 |
+
"model.layers.31.self_attn.rotary_emb.inv_freq": "model-00006-of-00015.safetensors",
|
267 |
+
"model.layers.31.self_attn.v_proj.weight": "model-00006-of-00015.safetensors",
|
268 |
+
"model.layers.32.input_layernorm.weight": "model-00006-of-00015.safetensors",
|
269 |
+
"model.layers.32.mlp.down_proj.weight": "model-00006-of-00015.safetensors",
|
270 |
+
"model.layers.32.mlp.gate_proj.weight": "model-00006-of-00015.safetensors",
|
271 |
+
"model.layers.32.mlp.up_proj.weight": "model-00006-of-00015.safetensors",
|
272 |
+
"model.layers.32.post_attention_layernorm.weight": "model-00006-of-00015.safetensors",
|
273 |
+
"model.layers.32.self_attn.k_proj.weight": "model-00006-of-00015.safetensors",
|
274 |
+
"model.layers.32.self_attn.o_proj.weight": "model-00006-of-00015.safetensors",
|
275 |
+
"model.layers.32.self_attn.q_proj.weight": "model-00006-of-00015.safetensors",
|
276 |
+
"model.layers.32.self_attn.rotary_emb.inv_freq": "model-00006-of-00015.safetensors",
|
277 |
+
"model.layers.32.self_attn.v_proj.weight": "model-00006-of-00015.safetensors",
|
278 |
+
"model.layers.33.input_layernorm.weight": "model-00006-of-00015.safetensors",
|
279 |
+
"model.layers.33.mlp.down_proj.weight": "model-00006-of-00015.safetensors",
|
280 |
+
"model.layers.33.mlp.gate_proj.weight": "model-00006-of-00015.safetensors",
|
281 |
+
"model.layers.33.mlp.up_proj.weight": "model-00006-of-00015.safetensors",
|
282 |
+
"model.layers.33.post_attention_layernorm.weight": "model-00006-of-00015.safetensors",
|
283 |
+
"model.layers.33.self_attn.k_proj.weight": "model-00006-of-00015.safetensors",
|
284 |
+
"model.layers.33.self_attn.o_proj.weight": "model-00006-of-00015.safetensors",
|
285 |
+
"model.layers.33.self_attn.q_proj.weight": "model-00006-of-00015.safetensors",
|
286 |
+
"model.layers.33.self_attn.rotary_emb.inv_freq": "model-00006-of-00015.safetensors",
|
287 |
+
"model.layers.33.self_attn.v_proj.weight": "model-00006-of-00015.safetensors",
|
288 |
+
"model.layers.34.input_layernorm.weight": "model-00007-of-00015.safetensors",
|
289 |
+
"model.layers.34.mlp.down_proj.weight": "model-00007-of-00015.safetensors",
|
290 |
+
"model.layers.34.mlp.gate_proj.weight": "model-00007-of-00015.safetensors",
|
291 |
+
"model.layers.34.mlp.up_proj.weight": "model-00007-of-00015.safetensors",
|
292 |
+
"model.layers.34.post_attention_layernorm.weight": "model-00007-of-00015.safetensors",
|
293 |
+
"model.layers.34.self_attn.k_proj.weight": "model-00006-of-00015.safetensors",
|
294 |
+
"model.layers.34.self_attn.o_proj.weight": "model-00006-of-00015.safetensors",
|
295 |
+
"model.layers.34.self_attn.q_proj.weight": "model-00006-of-00015.safetensors",
|
296 |
+
"model.layers.34.self_attn.rotary_emb.inv_freq": "model-00006-of-00015.safetensors",
|
297 |
+
"model.layers.34.self_attn.v_proj.weight": "model-00006-of-00015.safetensors",
|
298 |
+
"model.layers.35.input_layernorm.weight": "model-00007-of-00015.safetensors",
|
299 |
+
"model.layers.35.mlp.down_proj.weight": "model-00007-of-00015.safetensors",
|
300 |
+
"model.layers.35.mlp.gate_proj.weight": "model-00007-of-00015.safetensors",
|
301 |
+
"model.layers.35.mlp.up_proj.weight": "model-00007-of-00015.safetensors",
|
302 |
+
"model.layers.35.post_attention_layernorm.weight": "model-00007-of-00015.safetensors",
|
303 |
+
"model.layers.35.self_attn.k_proj.weight": "model-00007-of-00015.safetensors",
|
304 |
+
"model.layers.35.self_attn.o_proj.weight": "model-00007-of-00015.safetensors",
|
305 |
+
"model.layers.35.self_attn.q_proj.weight": "model-00007-of-00015.safetensors",
|
306 |
+
"model.layers.35.self_attn.rotary_emb.inv_freq": "model-00007-of-00015.safetensors",
|
307 |
+
"model.layers.35.self_attn.v_proj.weight": "model-00007-of-00015.safetensors",
|
308 |
+
"model.layers.36.input_layernorm.weight": "model-00007-of-00015.safetensors",
|
309 |
+
"model.layers.36.mlp.down_proj.weight": "model-00007-of-00015.safetensors",
|
310 |
+
"model.layers.36.mlp.gate_proj.weight": "model-00007-of-00015.safetensors",
|
311 |
+
"model.layers.36.mlp.up_proj.weight": "model-00007-of-00015.safetensors",
|
312 |
+
"model.layers.36.post_attention_layernorm.weight": "model-00007-of-00015.safetensors",
|
313 |
+
"model.layers.36.self_attn.k_proj.weight": "model-00007-of-00015.safetensors",
|
314 |
+
"model.layers.36.self_attn.o_proj.weight": "model-00007-of-00015.safetensors",
|
315 |
+
"model.layers.36.self_attn.q_proj.weight": "model-00007-of-00015.safetensors",
|
316 |
+
"model.layers.36.self_attn.rotary_emb.inv_freq": "model-00007-of-00015.safetensors",
|
317 |
+
"model.layers.36.self_attn.v_proj.weight": "model-00007-of-00015.safetensors",
|
318 |
+
"model.layers.37.input_layernorm.weight": "model-00007-of-00015.safetensors",
|
319 |
+
"model.layers.37.mlp.down_proj.weight": "model-00007-of-00015.safetensors",
|
320 |
+
"model.layers.37.mlp.gate_proj.weight": "model-00007-of-00015.safetensors",
|
321 |
+
"model.layers.37.mlp.up_proj.weight": "model-00007-of-00015.safetensors",
|
322 |
+
"model.layers.37.post_attention_layernorm.weight": "model-00007-of-00015.safetensors",
|
323 |
+
"model.layers.37.self_attn.k_proj.weight": "model-00007-of-00015.safetensors",
|
324 |
+
"model.layers.37.self_attn.o_proj.weight": "model-00007-of-00015.safetensors",
|
325 |
+
"model.layers.37.self_attn.q_proj.weight": "model-00007-of-00015.safetensors",
|
326 |
+
"model.layers.37.self_attn.rotary_emb.inv_freq": "model-00007-of-00015.safetensors",
|
327 |
+
"model.layers.37.self_attn.v_proj.weight": "model-00007-of-00015.safetensors",
|
328 |
+
"model.layers.38.input_layernorm.weight": "model-00007-of-00015.safetensors",
|
329 |
+
"model.layers.38.mlp.down_proj.weight": "model-00007-of-00015.safetensors",
|
330 |
+
"model.layers.38.mlp.gate_proj.weight": "model-00007-of-00015.safetensors",
|
331 |
+
"model.layers.38.mlp.up_proj.weight": "model-00007-of-00015.safetensors",
|
332 |
+
"model.layers.38.post_attention_layernorm.weight": "model-00007-of-00015.safetensors",
|
333 |
+
"model.layers.38.self_attn.k_proj.weight": "model-00007-of-00015.safetensors",
|
334 |
+
"model.layers.38.self_attn.o_proj.weight": "model-00007-of-00015.safetensors",
|
335 |
+
"model.layers.38.self_attn.q_proj.weight": "model-00007-of-00015.safetensors",
|
336 |
+
"model.layers.38.self_attn.rotary_emb.inv_freq": "model-00007-of-00015.safetensors",
|
337 |
+
"model.layers.38.self_attn.v_proj.weight": "model-00007-of-00015.safetensors",
|
338 |
+
"model.layers.39.input_layernorm.weight": "model-00007-of-00015.safetensors",
|
339 |
+
"model.layers.39.mlp.down_proj.weight": "model-00007-of-00015.safetensors",
|
340 |
+
"model.layers.39.mlp.gate_proj.weight": "model-00007-of-00015.safetensors",
|
341 |
+
"model.layers.39.mlp.up_proj.weight": "model-00007-of-00015.safetensors",
|
342 |
+
"model.layers.39.post_attention_layernorm.weight": "model-00007-of-00015.safetensors",
|
343 |
+
"model.layers.39.self_attn.k_proj.weight": "model-00007-of-00015.safetensors",
|
344 |
+
"model.layers.39.self_attn.o_proj.weight": "model-00007-of-00015.safetensors",
|
345 |
+
"model.layers.39.self_attn.q_proj.weight": "model-00007-of-00015.safetensors",
|
346 |
+
"model.layers.39.self_attn.rotary_emb.inv_freq": "model-00007-of-00015.safetensors",
|
347 |
+
"model.layers.39.self_attn.v_proj.weight": "model-00007-of-00015.safetensors",
|
348 |
+
"model.layers.4.input_layernorm.weight": "model-00001-of-00015.safetensors",
|
349 |
+
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00015.safetensors",
|
350 |
+
"model.layers.4.mlp.gate_proj.weight": "model-00001-of-00015.safetensors",
|
351 |
+
"model.layers.4.mlp.up_proj.weight": "model-00001-of-00015.safetensors",
|
352 |
+
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00015.safetensors",
|
353 |
+
"model.layers.4.self_attn.k_proj.weight": "model-00001-of-00015.safetensors",
|
354 |
+
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00015.safetensors",
|
355 |
+
"model.layers.4.self_attn.q_proj.weight": "model-00001-of-00015.safetensors",
|
356 |
+
"model.layers.4.self_attn.rotary_emb.inv_freq": "model-00001-of-00015.safetensors",
|
357 |
+
"model.layers.4.self_attn.v_proj.weight": "model-00001-of-00015.safetensors",
|
358 |
+
"model.layers.40.input_layernorm.weight": "model-00008-of-00015.safetensors",
|
359 |
+
"model.layers.40.mlp.down_proj.weight": "model-00008-of-00015.safetensors",
|
360 |
+
"model.layers.40.mlp.gate_proj.weight": "model-00008-of-00015.safetensors",
|
361 |
+
"model.layers.40.mlp.up_proj.weight": "model-00008-of-00015.safetensors",
|
362 |
+
"model.layers.40.post_attention_layernorm.weight": "model-00008-of-00015.safetensors",
|
363 |
+
"model.layers.40.self_attn.k_proj.weight": "model-00008-of-00015.safetensors",
|
364 |
+
"model.layers.40.self_attn.o_proj.weight": "model-00008-of-00015.safetensors",
|
365 |
+
"model.layers.40.self_attn.q_proj.weight": "model-00008-of-00015.safetensors",
|
366 |
+
"model.layers.40.self_attn.rotary_emb.inv_freq": "model-00008-of-00015.safetensors",
|
367 |
+
"model.layers.40.self_attn.v_proj.weight": "model-00008-of-00015.safetensors",
|
368 |
+
"model.layers.41.input_layernorm.weight": "model-00008-of-00015.safetensors",
|
369 |
+
"model.layers.41.mlp.down_proj.weight": "model-00008-of-00015.safetensors",
|
370 |
+
"model.layers.41.mlp.gate_proj.weight": "model-00008-of-00015.safetensors",
|
371 |
+
"model.layers.41.mlp.up_proj.weight": "model-00008-of-00015.safetensors",
|
372 |
+
"model.layers.41.post_attention_layernorm.weight": "model-00008-of-00015.safetensors",
|
373 |
+
"model.layers.41.self_attn.k_proj.weight": "model-00008-of-00015.safetensors",
|
374 |
+
"model.layers.41.self_attn.o_proj.weight": "model-00008-of-00015.safetensors",
|
375 |
+
"model.layers.41.self_attn.q_proj.weight": "model-00008-of-00015.safetensors",
|
376 |
+
"model.layers.41.self_attn.rotary_emb.inv_freq": "model-00008-of-00015.safetensors",
|
377 |
+
"model.layers.41.self_attn.v_proj.weight": "model-00008-of-00015.safetensors",
|
378 |
+
"model.layers.42.input_layernorm.weight": "model-00008-of-00015.safetensors",
|
379 |
+
"model.layers.42.mlp.down_proj.weight": "model-00008-of-00015.safetensors",
|
380 |
+
"model.layers.42.mlp.gate_proj.weight": "model-00008-of-00015.safetensors",
|
381 |
+
"model.layers.42.mlp.up_proj.weight": "model-00008-of-00015.safetensors",
|
382 |
+
"model.layers.42.post_attention_layernorm.weight": "model-00008-of-00015.safetensors",
|
383 |
+
"model.layers.42.self_attn.k_proj.weight": "model-00008-of-00015.safetensors",
|
384 |
+
"model.layers.42.self_attn.o_proj.weight": "model-00008-of-00015.safetensors",
|
385 |
+
"model.layers.42.self_attn.q_proj.weight": "model-00008-of-00015.safetensors",
|
386 |
+
"model.layers.42.self_attn.rotary_emb.inv_freq": "model-00008-of-00015.safetensors",
|
387 |
+
"model.layers.42.self_attn.v_proj.weight": "model-00008-of-00015.safetensors",
|
388 |
+
"model.layers.43.input_layernorm.weight": "model-00008-of-00015.safetensors",
|
389 |
+
"model.layers.43.mlp.down_proj.weight": "model-00008-of-00015.safetensors",
|
390 |
+
"model.layers.43.mlp.gate_proj.weight": "model-00008-of-00015.safetensors",
|
391 |
+
"model.layers.43.mlp.up_proj.weight": "model-00008-of-00015.safetensors",
|
392 |
+
"model.layers.43.post_attention_layernorm.weight": "model-00008-of-00015.safetensors",
|
393 |
+
"model.layers.43.self_attn.k_proj.weight": "model-00008-of-00015.safetensors",
|
394 |
+
"model.layers.43.self_attn.o_proj.weight": "model-00008-of-00015.safetensors",
|
395 |
+
"model.layers.43.self_attn.q_proj.weight": "model-00008-of-00015.safetensors",
|
396 |
+
"model.layers.43.self_attn.rotary_emb.inv_freq": "model-00008-of-00015.safetensors",
|
397 |
+
"model.layers.43.self_attn.v_proj.weight": "model-00008-of-00015.safetensors",
|
398 |
+
"model.layers.44.input_layernorm.weight": "model-00008-of-00015.safetensors",
|
399 |
+
"model.layers.44.mlp.down_proj.weight": "model-00008-of-00015.safetensors",
|
400 |
+
"model.layers.44.mlp.gate_proj.weight": "model-00008-of-00015.safetensors",
|
401 |
+
"model.layers.44.mlp.up_proj.weight": "model-00008-of-00015.safetensors",
|
402 |
+
"model.layers.44.post_attention_layernorm.weight": "model-00008-of-00015.safetensors",
|
403 |
+
"model.layers.44.self_attn.k_proj.weight": "model-00008-of-00015.safetensors",
|
404 |
+
"model.layers.44.self_attn.o_proj.weight": "model-00008-of-00015.safetensors",
|
405 |
+
"model.layers.44.self_attn.q_proj.weight": "model-00008-of-00015.safetensors",
|
406 |
+
"model.layers.44.self_attn.rotary_emb.inv_freq": "model-00008-of-00015.safetensors",
|
407 |
+
"model.layers.44.self_attn.v_proj.weight": "model-00008-of-00015.safetensors",
|
408 |
+
"model.layers.45.input_layernorm.weight": "model-00009-of-00015.safetensors",
|
409 |
+
"model.layers.45.mlp.down_proj.weight": "model-00008-of-00015.safetensors",
|
410 |
+
"model.layers.45.mlp.gate_proj.weight": "model-00008-of-00015.safetensors",
|
411 |
+
"model.layers.45.mlp.up_proj.weight": "model-00009-of-00015.safetensors",
|
412 |
+
"model.layers.45.post_attention_layernorm.weight": "model-00009-of-00015.safetensors",
|
413 |
+
"model.layers.45.self_attn.k_proj.weight": "model-00008-of-00015.safetensors",
|
414 |
+
"model.layers.45.self_attn.o_proj.weight": "model-00008-of-00015.safetensors",
|
415 |
+
"model.layers.45.self_attn.q_proj.weight": "model-00008-of-00015.safetensors",
|
416 |
+
"model.layers.45.self_attn.rotary_emb.inv_freq": "model-00008-of-00015.safetensors",
|
417 |
+
"model.layers.45.self_attn.v_proj.weight": "model-00008-of-00015.safetensors",
|
418 |
+
"model.layers.46.input_layernorm.weight": "model-00009-of-00015.safetensors",
|
419 |
+
"model.layers.46.mlp.down_proj.weight": "model-00009-of-00015.safetensors",
|
420 |
+
"model.layers.46.mlp.gate_proj.weight": "model-00009-of-00015.safetensors",
|
421 |
+
"model.layers.46.mlp.up_proj.weight": "model-00009-of-00015.safetensors",
|
422 |
+
"model.layers.46.post_attention_layernorm.weight": "model-00009-of-00015.safetensors",
|
423 |
+
"model.layers.46.self_attn.k_proj.weight": "model-00009-of-00015.safetensors",
|
424 |
+
"model.layers.46.self_attn.o_proj.weight": "model-00009-of-00015.safetensors",
|
425 |
+
"model.layers.46.self_attn.q_proj.weight": "model-00009-of-00015.safetensors",
|
426 |
+
"model.layers.46.self_attn.rotary_emb.inv_freq": "model-00009-of-00015.safetensors",
|
427 |
+
"model.layers.46.self_attn.v_proj.weight": "model-00009-of-00015.safetensors",
|
428 |
+
"model.layers.47.input_layernorm.weight": "model-00009-of-00015.safetensors",
|
429 |
+
"model.layers.47.mlp.down_proj.weight": "model-00009-of-00015.safetensors",
|
430 |
+
"model.layers.47.mlp.gate_proj.weight": "model-00009-of-00015.safetensors",
|
431 |
+
"model.layers.47.mlp.up_proj.weight": "model-00009-of-00015.safetensors",
|
432 |
+
"model.layers.47.post_attention_layernorm.weight": "model-00009-of-00015.safetensors",
|
433 |
+
"model.layers.47.self_attn.k_proj.weight": "model-00009-of-00015.safetensors",
|
434 |
+
"model.layers.47.self_attn.o_proj.weight": "model-00009-of-00015.safetensors",
|
435 |
+
"model.layers.47.self_attn.q_proj.weight": "model-00009-of-00015.safetensors",
|
436 |
+
"model.layers.47.self_attn.rotary_emb.inv_freq": "model-00009-of-00015.safetensors",
|
437 |
+
"model.layers.47.self_attn.v_proj.weight": "model-00009-of-00015.safetensors",
|
438 |
+
"model.layers.48.input_layernorm.weight": "model-00009-of-00015.safetensors",
|
439 |
+
"model.layers.48.mlp.down_proj.weight": "model-00009-of-00015.safetensors",
|
440 |
+
"model.layers.48.mlp.gate_proj.weight": "model-00009-of-00015.safetensors",
|
441 |
+
"model.layers.48.mlp.up_proj.weight": "model-00009-of-00015.safetensors",
|
442 |
+
"model.layers.48.post_attention_layernorm.weight": "model-00009-of-00015.safetensors",
|
443 |
+
"model.layers.48.self_attn.k_proj.weight": "model-00009-of-00015.safetensors",
|
444 |
+
"model.layers.48.self_attn.o_proj.weight": "model-00009-of-00015.safetensors",
|
445 |
+
"model.layers.48.self_attn.q_proj.weight": "model-00009-of-00015.safetensors",
|
446 |
+
"model.layers.48.self_attn.rotary_emb.inv_freq": "model-00009-of-00015.safetensors",
|
447 |
+
"model.layers.48.self_attn.v_proj.weight": "model-00009-of-00015.safetensors",
|
448 |
+
"model.layers.49.input_layernorm.weight": "model-00009-of-00015.safetensors",
|
449 |
+
"model.layers.49.mlp.down_proj.weight": "model-00009-of-00015.safetensors",
|
450 |
+
"model.layers.49.mlp.gate_proj.weight": "model-00009-of-00015.safetensors",
|
451 |
+
"model.layers.49.mlp.up_proj.weight": "model-00009-of-00015.safetensors",
|
452 |
+
"model.layers.49.post_attention_layernorm.weight": "model-00009-of-00015.safetensors",
|
453 |
+
"model.layers.49.self_attn.k_proj.weight": "model-00009-of-00015.safetensors",
|
454 |
+
"model.layers.49.self_attn.o_proj.weight": "model-00009-of-00015.safetensors",
|
455 |
+
"model.layers.49.self_attn.q_proj.weight": "model-00009-of-00015.safetensors",
|
456 |
+
"model.layers.49.self_attn.rotary_emb.inv_freq": "model-00009-of-00015.safetensors",
|
457 |
+
"model.layers.49.self_attn.v_proj.weight": "model-00009-of-00015.safetensors",
|
458 |
+
"model.layers.5.input_layernorm.weight": "model-00002-of-00015.safetensors",
|
459 |
+
"model.layers.5.mlp.down_proj.weight": "model-00002-of-00015.safetensors",
|
460 |
+
"model.layers.5.mlp.gate_proj.weight": "model-00001-of-00015.safetensors",
|
461 |
+
"model.layers.5.mlp.up_proj.weight": "model-00002-of-00015.safetensors",
|
462 |
+
"model.layers.5.post_attention_layernorm.weight": "model-00002-of-00015.safetensors",
|
463 |
+
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00015.safetensors",
|
464 |
+
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00015.safetensors",
|
465 |
+
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00015.safetensors",
|
466 |
+
"model.layers.5.self_attn.rotary_emb.inv_freq": "model-00001-of-00015.safetensors",
|
467 |
+
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00015.safetensors",
|
468 |
+
"model.layers.50.input_layernorm.weight": "model-00009-of-00015.safetensors",
|
469 |
+
"model.layers.50.mlp.down_proj.weight": "model-00009-of-00015.safetensors",
|
470 |
+
"model.layers.50.mlp.gate_proj.weight": "model-00009-of-00015.safetensors",
|
471 |
+
"model.layers.50.mlp.up_proj.weight": "model-00009-of-00015.safetensors",
|
472 |
+
"model.layers.50.post_attention_layernorm.weight": "model-00009-of-00015.safetensors",
|
473 |
+
"model.layers.50.self_attn.k_proj.weight": "model-00009-of-00015.safetensors",
|
474 |
+
"model.layers.50.self_attn.o_proj.weight": "model-00009-of-00015.safetensors",
|
475 |
+
"model.layers.50.self_attn.q_proj.weight": "model-00009-of-00015.safetensors",
|
476 |
+
"model.layers.50.self_attn.rotary_emb.inv_freq": "model-00009-of-00015.safetensors",
|
477 |
+
"model.layers.50.self_attn.v_proj.weight": "model-00009-of-00015.safetensors",
|
478 |
+
"model.layers.51.input_layernorm.weight": "model-00010-of-00015.safetensors",
|
479 |
+
"model.layers.51.mlp.down_proj.weight": "model-00010-of-00015.safetensors",
|
480 |
+
"model.layers.51.mlp.gate_proj.weight": "model-00009-of-00015.safetensors",
|
481 |
+
"model.layers.51.mlp.up_proj.weight": "model-00010-of-00015.safetensors",
|
482 |
+
"model.layers.51.post_attention_layernorm.weight": "model-00010-of-00015.safetensors",
|
483 |
+
"model.layers.51.self_attn.k_proj.weight": "model-00009-of-00015.safetensors",
|
484 |
+
"model.layers.51.self_attn.o_proj.weight": "model-00009-of-00015.safetensors",
|
485 |
+
"model.layers.51.self_attn.q_proj.weight": "model-00009-of-00015.safetensors",
|
486 |
+
"model.layers.51.self_attn.rotary_emb.inv_freq": "model-00009-of-00015.safetensors",
|
487 |
+
"model.layers.51.self_attn.v_proj.weight": "model-00009-of-00015.safetensors",
|
488 |
+
"model.layers.52.input_layernorm.weight": "model-00010-of-00015.safetensors",
|
489 |
+
"model.layers.52.mlp.down_proj.weight": "model-00010-of-00015.safetensors",
|
490 |
+
"model.layers.52.mlp.gate_proj.weight": "model-00010-of-00015.safetensors",
|
491 |
+
"model.layers.52.mlp.up_proj.weight": "model-00010-of-00015.safetensors",
|
492 |
+
"model.layers.52.post_attention_layernorm.weight": "model-00010-of-00015.safetensors",
|
493 |
+
"model.layers.52.self_attn.k_proj.weight": "model-00010-of-00015.safetensors",
|
494 |
+
"model.layers.52.self_attn.o_proj.weight": "model-00010-of-00015.safetensors",
|
495 |
+
"model.layers.52.self_attn.q_proj.weight": "model-00010-of-00015.safetensors",
|
496 |
+
"model.layers.52.self_attn.rotary_emb.inv_freq": "model-00010-of-00015.safetensors",
|
497 |
+
"model.layers.52.self_attn.v_proj.weight": "model-00010-of-00015.safetensors",
|
498 |
+
"model.layers.53.input_layernorm.weight": "model-00010-of-00015.safetensors",
|
499 |
+
"model.layers.53.mlp.down_proj.weight": "model-00010-of-00015.safetensors",
|
500 |
+
"model.layers.53.mlp.gate_proj.weight": "model-00010-of-00015.safetensors",
|
501 |
+
"model.layers.53.mlp.up_proj.weight": "model-00010-of-00015.safetensors",
|
502 |
+
"model.layers.53.post_attention_layernorm.weight": "model-00010-of-00015.safetensors",
|
503 |
+
"model.layers.53.self_attn.k_proj.weight": "model-00010-of-00015.safetensors",
|
504 |
+
"model.layers.53.self_attn.o_proj.weight": "model-00010-of-00015.safetensors",
|
505 |
+
"model.layers.53.self_attn.q_proj.weight": "model-00010-of-00015.safetensors",
|
506 |
+
"model.layers.53.self_attn.rotary_emb.inv_freq": "model-00010-of-00015.safetensors",
|
507 |
+
"model.layers.53.self_attn.v_proj.weight": "model-00010-of-00015.safetensors",
|
508 |
+
"model.layers.54.input_layernorm.weight": "model-00010-of-00015.safetensors",
|
509 |
+
"model.layers.54.mlp.down_proj.weight": "model-00010-of-00015.safetensors",
|
510 |
+
"model.layers.54.mlp.gate_proj.weight": "model-00010-of-00015.safetensors",
|
511 |
+
"model.layers.54.mlp.up_proj.weight": "model-00010-of-00015.safetensors",
|
512 |
+
"model.layers.54.post_attention_layernorm.weight": "model-00010-of-00015.safetensors",
|
513 |
+
"model.layers.54.self_attn.k_proj.weight": "model-00010-of-00015.safetensors",
|
514 |
+
"model.layers.54.self_attn.o_proj.weight": "model-00010-of-00015.safetensors",
|
515 |
+
"model.layers.54.self_attn.q_proj.weight": "model-00010-of-00015.safetensors",
|
516 |
+
"model.layers.54.self_attn.rotary_emb.inv_freq": "model-00010-of-00015.safetensors",
|
517 |
+
"model.layers.54.self_attn.v_proj.weight": "model-00010-of-00015.safetensors",
|
518 |
+
"model.layers.55.input_layernorm.weight": "model-00010-of-00015.safetensors",
|
519 |
+
"model.layers.55.mlp.down_proj.weight": "model-00010-of-00015.safetensors",
|
520 |
+
"model.layers.55.mlp.gate_proj.weight": "model-00010-of-00015.safetensors",
|
521 |
+
"model.layers.55.mlp.up_proj.weight": "model-00010-of-00015.safetensors",
|
522 |
+
"model.layers.55.post_attention_layernorm.weight": "model-00010-of-00015.safetensors",
|
523 |
+
"model.layers.55.self_attn.k_proj.weight": "model-00010-of-00015.safetensors",
|
524 |
+
"model.layers.55.self_attn.o_proj.weight": "model-00010-of-00015.safetensors",
|
525 |
+
"model.layers.55.self_attn.q_proj.weight": "model-00010-of-00015.safetensors",
|
526 |
+
"model.layers.55.self_attn.rotary_emb.inv_freq": "model-00010-of-00015.safetensors",
|
527 |
+
"model.layers.55.self_attn.v_proj.weight": "model-00010-of-00015.safetensors",
|
528 |
+
"model.layers.56.input_layernorm.weight": "model-00010-of-00015.safetensors",
|
529 |
+
"model.layers.56.mlp.down_proj.weight": "model-00010-of-00015.safetensors",
|
530 |
+
"model.layers.56.mlp.gate_proj.weight": "model-00010-of-00015.safetensors",
|
531 |
+
"model.layers.56.mlp.up_proj.weight": "model-00010-of-00015.safetensors",
|
532 |
+
"model.layers.56.post_attention_layernorm.weight": "model-00010-of-00015.safetensors",
|
533 |
+
"model.layers.56.self_attn.k_proj.weight": "model-00010-of-00015.safetensors",
|
534 |
+
"model.layers.56.self_attn.o_proj.weight": "model-00010-of-00015.safetensors",
|
535 |
+
"model.layers.56.self_attn.q_proj.weight": "model-00010-of-00015.safetensors",
|
536 |
+
"model.layers.56.self_attn.rotary_emb.inv_freq": "model-00010-of-00015.safetensors",
|
537 |
+
"model.layers.56.self_attn.v_proj.weight": "model-00010-of-00015.safetensors",
|
538 |
+
"model.layers.57.input_layernorm.weight": "model-00011-of-00015.safetensors",
|
539 |
+
"model.layers.57.mlp.down_proj.weight": "model-00011-of-00015.safetensors",
|
540 |
+
"model.layers.57.mlp.gate_proj.weight": "model-00011-of-00015.safetensors",
|
541 |
+
"model.layers.57.mlp.up_proj.weight": "model-00011-of-00015.safetensors",
|
542 |
+
"model.layers.57.post_attention_layernorm.weight": "model-00011-of-00015.safetensors",
|
543 |
+
"model.layers.57.self_attn.k_proj.weight": "model-00010-of-00015.safetensors",
|
544 |
+
"model.layers.57.self_attn.o_proj.weight": "model-00010-of-00015.safetensors",
|
545 |
+
"model.layers.57.self_attn.q_proj.weight": "model-00010-of-00015.safetensors",
|
546 |
+
"model.layers.57.self_attn.rotary_emb.inv_freq": "model-00010-of-00015.safetensors",
|
547 |
+
"model.layers.57.self_attn.v_proj.weight": "model-00010-of-00015.safetensors",
|
548 |
+
"model.layers.58.input_layernorm.weight": "model-00011-of-00015.safetensors",
|
549 |
+
"model.layers.58.mlp.down_proj.weight": "model-00011-of-00015.safetensors",
|
550 |
+
"model.layers.58.mlp.gate_proj.weight": "model-00011-of-00015.safetensors",
|
551 |
+
"model.layers.58.mlp.up_proj.weight": "model-00011-of-00015.safetensors",
|
552 |
+
"model.layers.58.post_attention_layernorm.weight": "model-00011-of-00015.safetensors",
|
553 |
+
"model.layers.58.self_attn.k_proj.weight": "model-00011-of-00015.safetensors",
|
554 |
+
"model.layers.58.self_attn.o_proj.weight": "model-00011-of-00015.safetensors",
|
555 |
+
"model.layers.58.self_attn.q_proj.weight": "model-00011-of-00015.safetensors",
|
556 |
+
"model.layers.58.self_attn.rotary_emb.inv_freq": "model-00011-of-00015.safetensors",
|
557 |
+
"model.layers.58.self_attn.v_proj.weight": "model-00011-of-00015.safetensors",
|
558 |
+
"model.layers.59.input_layernorm.weight": "model-00011-of-00015.safetensors",
|
559 |
+
"model.layers.59.mlp.down_proj.weight": "model-00011-of-00015.safetensors",
|
560 |
+
"model.layers.59.mlp.gate_proj.weight": "model-00011-of-00015.safetensors",
|
561 |
+
"model.layers.59.mlp.up_proj.weight": "model-00011-of-00015.safetensors",
|
562 |
+
"model.layers.59.post_attention_layernorm.weight": "model-00011-of-00015.safetensors",
|
563 |
+
"model.layers.59.self_attn.k_proj.weight": "model-00011-of-00015.safetensors",
|
564 |
+
"model.layers.59.self_attn.o_proj.weight": "model-00011-of-00015.safetensors",
|
565 |
+
"model.layers.59.self_attn.q_proj.weight": "model-00011-of-00015.safetensors",
|
566 |
+
"model.layers.59.self_attn.rotary_emb.inv_freq": "model-00011-of-00015.safetensors",
|
567 |
+
"model.layers.59.self_attn.v_proj.weight": "model-00011-of-00015.safetensors",
|
568 |
+
"model.layers.6.input_layernorm.weight": "model-00002-of-00015.safetensors",
|
569 |
+
"model.layers.6.mlp.down_proj.weight": "model-00002-of-00015.safetensors",
|
570 |
+
"model.layers.6.mlp.gate_proj.weight": "model-00002-of-00015.safetensors",
|
571 |
+
"model.layers.6.mlp.up_proj.weight": "model-00002-of-00015.safetensors",
|
572 |
+
"model.layers.6.post_attention_layernorm.weight": "model-00002-of-00015.safetensors",
|
573 |
+
"model.layers.6.self_attn.k_proj.weight": "model-00002-of-00015.safetensors",
|
574 |
+
"model.layers.6.self_attn.o_proj.weight": "model-00002-of-00015.safetensors",
|
575 |
+
"model.layers.6.self_attn.q_proj.weight": "model-00002-of-00015.safetensors",
|
576 |
+
"model.layers.6.self_attn.rotary_emb.inv_freq": "model-00002-of-00015.safetensors",
|
577 |
+
"model.layers.6.self_attn.v_proj.weight": "model-00002-of-00015.safetensors",
|
578 |
+
"model.layers.60.input_layernorm.weight": "model-00011-of-00015.safetensors",
|
579 |
+
"model.layers.60.mlp.down_proj.weight": "model-00011-of-00015.safetensors",
|
580 |
+
"model.layers.60.mlp.gate_proj.weight": "model-00011-of-00015.safetensors",
|
581 |
+
"model.layers.60.mlp.up_proj.weight": "model-00011-of-00015.safetensors",
|
582 |
+
"model.layers.60.post_attention_layernorm.weight": "model-00011-of-00015.safetensors",
|
583 |
+
"model.layers.60.self_attn.k_proj.weight": "model-00011-of-00015.safetensors",
|
584 |
+
"model.layers.60.self_attn.o_proj.weight": "model-00011-of-00015.safetensors",
|
585 |
+
"model.layers.60.self_attn.q_proj.weight": "model-00011-of-00015.safetensors",
|
586 |
+
"model.layers.60.self_attn.rotary_emb.inv_freq": "model-00011-of-00015.safetensors",
|
587 |
+
"model.layers.60.self_attn.v_proj.weight": "model-00011-of-00015.safetensors",
|
588 |
+
"model.layers.61.input_layernorm.weight": "model-00011-of-00015.safetensors",
|
589 |
+
"model.layers.61.mlp.down_proj.weight": "model-00011-of-00015.safetensors",
|
590 |
+
"model.layers.61.mlp.gate_proj.weight": "model-00011-of-00015.safetensors",
|
591 |
+
"model.layers.61.mlp.up_proj.weight": "model-00011-of-00015.safetensors",
|
592 |
+
"model.layers.61.post_attention_layernorm.weight": "model-00011-of-00015.safetensors",
|
593 |
+
"model.layers.61.self_attn.k_proj.weight": "model-00011-of-00015.safetensors",
|
594 |
+
"model.layers.61.self_attn.o_proj.weight": "model-00011-of-00015.safetensors",
|
595 |
+
"model.layers.61.self_attn.q_proj.weight": "model-00011-of-00015.safetensors",
|
596 |
+
"model.layers.61.self_attn.rotary_emb.inv_freq": "model-00011-of-00015.safetensors",
|
597 |
+
"model.layers.61.self_attn.v_proj.weight": "model-00011-of-00015.safetensors",
|
598 |
+
"model.layers.62.input_layernorm.weight": "model-00011-of-00015.safetensors",
|
599 |
+
"model.layers.62.mlp.down_proj.weight": "model-00011-of-00015.safetensors",
|
600 |
+
"model.layers.62.mlp.gate_proj.weight": "model-00011-of-00015.safetensors",
|
601 |
+
"model.layers.62.mlp.up_proj.weight": "model-00011-of-00015.safetensors",
|
602 |
+
"model.layers.62.post_attention_layernorm.weight": "model-00011-of-00015.safetensors",
|
603 |
+
"model.layers.62.self_attn.k_proj.weight": "model-00011-of-00015.safetensors",
|
604 |
+
"model.layers.62.self_attn.o_proj.weight": "model-00011-of-00015.safetensors",
|
605 |
+
"model.layers.62.self_attn.q_proj.weight": "model-00011-of-00015.safetensors",
|
606 |
+
"model.layers.62.self_attn.rotary_emb.inv_freq": "model-00011-of-00015.safetensors",
|
607 |
+
"model.layers.62.self_attn.v_proj.weight": "model-00011-of-00015.safetensors",
|
608 |
+
"model.layers.63.input_layernorm.weight": "model-00012-of-00015.safetensors",
|
609 |
+
"model.layers.63.mlp.down_proj.weight": "model-00012-of-00015.safetensors",
|
610 |
+
"model.layers.63.mlp.gate_proj.weight": "model-00012-of-00015.safetensors",
|
611 |
+
"model.layers.63.mlp.up_proj.weight": "model-00012-of-00015.safetensors",
|
612 |
+
"model.layers.63.post_attention_layernorm.weight": "model-00012-of-00015.safetensors",
|
613 |
+
"model.layers.63.self_attn.k_proj.weight": "model-00012-of-00015.safetensors",
|
614 |
+
"model.layers.63.self_attn.o_proj.weight": "model-00012-of-00015.safetensors",
|
615 |
+
"model.layers.63.self_attn.q_proj.weight": "model-00012-of-00015.safetensors",
|
616 |
+
"model.layers.63.self_attn.rotary_emb.inv_freq": "model-00012-of-00015.safetensors",
|
617 |
+
"model.layers.63.self_attn.v_proj.weight": "model-00012-of-00015.safetensors",
|
618 |
+
"model.layers.64.input_layernorm.weight": "model-00012-of-00015.safetensors",
|
619 |
+
"model.layers.64.mlp.down_proj.weight": "model-00012-of-00015.safetensors",
|
620 |
+
"model.layers.64.mlp.gate_proj.weight": "model-00012-of-00015.safetensors",
|
621 |
+
"model.layers.64.mlp.up_proj.weight": "model-00012-of-00015.safetensors",
|
622 |
+
"model.layers.64.post_attention_layernorm.weight": "model-00012-of-00015.safetensors",
|
623 |
+
"model.layers.64.self_attn.k_proj.weight": "model-00012-of-00015.safetensors",
|
624 |
+
"model.layers.64.self_attn.o_proj.weight": "model-00012-of-00015.safetensors",
|
625 |
+
"model.layers.64.self_attn.q_proj.weight": "model-00012-of-00015.safetensors",
|
626 |
+
"model.layers.64.self_attn.rotary_emb.inv_freq": "model-00012-of-00015.safetensors",
|
627 |
+
"model.layers.64.self_attn.v_proj.weight": "model-00012-of-00015.safetensors",
|
628 |
+
"model.layers.65.input_layernorm.weight": "model-00012-of-00015.safetensors",
|
629 |
+
"model.layers.65.mlp.down_proj.weight": "model-00012-of-00015.safetensors",
|
630 |
+
"model.layers.65.mlp.gate_proj.weight": "model-00012-of-00015.safetensors",
|
631 |
+
"model.layers.65.mlp.up_proj.weight": "model-00012-of-00015.safetensors",
|
632 |
+
"model.layers.65.post_attention_layernorm.weight": "model-00012-of-00015.safetensors",
|
633 |
+
"model.layers.65.self_attn.k_proj.weight": "model-00012-of-00015.safetensors",
|
634 |
+
"model.layers.65.self_attn.o_proj.weight": "model-00012-of-00015.safetensors",
|
635 |
+
"model.layers.65.self_attn.q_proj.weight": "model-00012-of-00015.safetensors",
|
636 |
+
"model.layers.65.self_attn.rotary_emb.inv_freq": "model-00012-of-00015.safetensors",
|
637 |
+
"model.layers.65.self_attn.v_proj.weight": "model-00012-of-00015.safetensors",
|
638 |
+
"model.layers.66.input_layernorm.weight": "model-00012-of-00015.safetensors",
|
639 |
+
"model.layers.66.mlp.down_proj.weight": "model-00012-of-00015.safetensors",
|
640 |
+
"model.layers.66.mlp.gate_proj.weight": "model-00012-of-00015.safetensors",
|
641 |
+
"model.layers.66.mlp.up_proj.weight": "model-00012-of-00015.safetensors",
|
642 |
+
"model.layers.66.post_attention_layernorm.weight": "model-00012-of-00015.safetensors",
|
643 |
+
"model.layers.66.self_attn.k_proj.weight": "model-00012-of-00015.safetensors",
|
644 |
+
"model.layers.66.self_attn.o_proj.weight": "model-00012-of-00015.safetensors",
|
645 |
+
"model.layers.66.self_attn.q_proj.weight": "model-00012-of-00015.safetensors",
|
646 |
+
"model.layers.66.self_attn.rotary_emb.inv_freq": "model-00012-of-00015.safetensors",
|
647 |
+
"model.layers.66.self_attn.v_proj.weight": "model-00012-of-00015.safetensors",
|
648 |
+
"model.layers.67.input_layernorm.weight": "model-00012-of-00015.safetensors",
|
649 |
+
"model.layers.67.mlp.down_proj.weight": "model-00012-of-00015.safetensors",
|
650 |
+
"model.layers.67.mlp.gate_proj.weight": "model-00012-of-00015.safetensors",
|
651 |
+
"model.layers.67.mlp.up_proj.weight": "model-00012-of-00015.safetensors",
|
652 |
+
"model.layers.67.post_attention_layernorm.weight": "model-00012-of-00015.safetensors",
|
653 |
+
"model.layers.67.self_attn.k_proj.weight": "model-00012-of-00015.safetensors",
|
654 |
+
"model.layers.67.self_attn.o_proj.weight": "model-00012-of-00015.safetensors",
|
655 |
+
"model.layers.67.self_attn.q_proj.weight": "model-00012-of-00015.safetensors",
|
656 |
+
"model.layers.67.self_attn.rotary_emb.inv_freq": "model-00012-of-00015.safetensors",
|
657 |
+
"model.layers.67.self_attn.v_proj.weight": "model-00012-of-00015.safetensors",
|
658 |
+
"model.layers.68.input_layernorm.weight": "model-00013-of-00015.safetensors",
|
659 |
+
"model.layers.68.mlp.down_proj.weight": "model-00012-of-00015.safetensors",
|
660 |
+
"model.layers.68.mlp.gate_proj.weight": "model-00012-of-00015.safetensors",
|
661 |
+
"model.layers.68.mlp.up_proj.weight": "model-00013-of-00015.safetensors",
|
662 |
+
"model.layers.68.post_attention_layernorm.weight": "model-00013-of-00015.safetensors",
|
663 |
+
"model.layers.68.self_attn.k_proj.weight": "model-00012-of-00015.safetensors",
|
664 |
+
"model.layers.68.self_attn.o_proj.weight": "model-00012-of-00015.safetensors",
|
665 |
+
"model.layers.68.self_attn.q_proj.weight": "model-00012-of-00015.safetensors",
|
666 |
+
"model.layers.68.self_attn.rotary_emb.inv_freq": "model-00012-of-00015.safetensors",
|
667 |
+
"model.layers.68.self_attn.v_proj.weight": "model-00012-of-00015.safetensors",
|
668 |
+
"model.layers.69.input_layernorm.weight": "model-00013-of-00015.safetensors",
|
669 |
+
"model.layers.69.mlp.down_proj.weight": "model-00013-of-00015.safetensors",
|
670 |
+
"model.layers.69.mlp.gate_proj.weight": "model-00013-of-00015.safetensors",
|
671 |
+
"model.layers.69.mlp.up_proj.weight": "model-00013-of-00015.safetensors",
|
672 |
+
"model.layers.69.post_attention_layernorm.weight": "model-00013-of-00015.safetensors",
|
673 |
+
"model.layers.69.self_attn.k_proj.weight": "model-00013-of-00015.safetensors",
|
674 |
+
"model.layers.69.self_attn.o_proj.weight": "model-00013-of-00015.safetensors",
|
675 |
+
"model.layers.69.self_attn.q_proj.weight": "model-00013-of-00015.safetensors",
|
676 |
+
"model.layers.69.self_attn.rotary_emb.inv_freq": "model-00013-of-00015.safetensors",
|
677 |
+
"model.layers.69.self_attn.v_proj.weight": "model-00013-of-00015.safetensors",
|
678 |
+
"model.layers.7.input_layernorm.weight": "model-00002-of-00015.safetensors",
|
679 |
+
"model.layers.7.mlp.down_proj.weight": "model-00002-of-00015.safetensors",
|
680 |
+
"model.layers.7.mlp.gate_proj.weight": "model-00002-of-00015.safetensors",
|
681 |
+
"model.layers.7.mlp.up_proj.weight": "model-00002-of-00015.safetensors",
|
682 |
+
"model.layers.7.post_attention_layernorm.weight": "model-00002-of-00015.safetensors",
|
683 |
+
"model.layers.7.self_attn.k_proj.weight": "model-00002-of-00015.safetensors",
|
684 |
+
"model.layers.7.self_attn.o_proj.weight": "model-00002-of-00015.safetensors",
|
685 |
+
"model.layers.7.self_attn.q_proj.weight": "model-00002-of-00015.safetensors",
|
686 |
+
"model.layers.7.self_attn.rotary_emb.inv_freq": "model-00002-of-00015.safetensors",
|
687 |
+
"model.layers.7.self_attn.v_proj.weight": "model-00002-of-00015.safetensors",
|
688 |
+
"model.layers.70.input_layernorm.weight": "model-00013-of-00015.safetensors",
|
689 |
+
"model.layers.70.mlp.down_proj.weight": "model-00013-of-00015.safetensors",
|
690 |
+
"model.layers.70.mlp.gate_proj.weight": "model-00013-of-00015.safetensors",
|
691 |
+
"model.layers.70.mlp.up_proj.weight": "model-00013-of-00015.safetensors",
|
692 |
+
"model.layers.70.post_attention_layernorm.weight": "model-00013-of-00015.safetensors",
|
693 |
+
"model.layers.70.self_attn.k_proj.weight": "model-00013-of-00015.safetensors",
|
694 |
+
"model.layers.70.self_attn.o_proj.weight": "model-00013-of-00015.safetensors",
|
695 |
+
"model.layers.70.self_attn.q_proj.weight": "model-00013-of-00015.safetensors",
|
696 |
+
"model.layers.70.self_attn.rotary_emb.inv_freq": "model-00013-of-00015.safetensors",
|
697 |
+
"model.layers.70.self_attn.v_proj.weight": "model-00013-of-00015.safetensors",
|
698 |
+
"model.layers.71.input_layernorm.weight": "model-00013-of-00015.safetensors",
|
699 |
+
"model.layers.71.mlp.down_proj.weight": "model-00013-of-00015.safetensors",
|
700 |
+
"model.layers.71.mlp.gate_proj.weight": "model-00013-of-00015.safetensors",
|
701 |
+
"model.layers.71.mlp.up_proj.weight": "model-00013-of-00015.safetensors",
|
702 |
+
"model.layers.71.post_attention_layernorm.weight": "model-00013-of-00015.safetensors",
|
703 |
+
"model.layers.71.self_attn.k_proj.weight": "model-00013-of-00015.safetensors",
|
704 |
+
"model.layers.71.self_attn.o_proj.weight": "model-00013-of-00015.safetensors",
|
705 |
+
"model.layers.71.self_attn.q_proj.weight": "model-00013-of-00015.safetensors",
|
706 |
+
"model.layers.71.self_attn.rotary_emb.inv_freq": "model-00013-of-00015.safetensors",
|
707 |
+
"model.layers.71.self_attn.v_proj.weight": "model-00013-of-00015.safetensors",
|
708 |
+
"model.layers.72.input_layernorm.weight": "model-00013-of-00015.safetensors",
|
709 |
+
"model.layers.72.mlp.down_proj.weight": "model-00013-of-00015.safetensors",
|
710 |
+
"model.layers.72.mlp.gate_proj.weight": "model-00013-of-00015.safetensors",
|
711 |
+
"model.layers.72.mlp.up_proj.weight": "model-00013-of-00015.safetensors",
|
712 |
+
"model.layers.72.post_attention_layernorm.weight": "model-00013-of-00015.safetensors",
|
713 |
+
"model.layers.72.self_attn.k_proj.weight": "model-00013-of-00015.safetensors",
|
714 |
+
"model.layers.72.self_attn.o_proj.weight": "model-00013-of-00015.safetensors",
|
715 |
+
"model.layers.72.self_attn.q_proj.weight": "model-00013-of-00015.safetensors",
|
716 |
+
"model.layers.72.self_attn.rotary_emb.inv_freq": "model-00013-of-00015.safetensors",
|
717 |
+
"model.layers.72.self_attn.v_proj.weight": "model-00013-of-00015.safetensors",
|
718 |
+
"model.layers.73.input_layernorm.weight": "model-00013-of-00015.safetensors",
|
719 |
+
"model.layers.73.mlp.down_proj.weight": "model-00013-of-00015.safetensors",
|
720 |
+
"model.layers.73.mlp.gate_proj.weight": "model-00013-of-00015.safetensors",
|
721 |
+
"model.layers.73.mlp.up_proj.weight": "model-00013-of-00015.safetensors",
|
722 |
+
"model.layers.73.post_attention_layernorm.weight": "model-00013-of-00015.safetensors",
|
723 |
+
"model.layers.73.self_attn.k_proj.weight": "model-00013-of-00015.safetensors",
|
724 |
+
"model.layers.73.self_attn.o_proj.weight": "model-00013-of-00015.safetensors",
|
725 |
+
"model.layers.73.self_attn.q_proj.weight": "model-00013-of-00015.safetensors",
|
726 |
+
"model.layers.73.self_attn.rotary_emb.inv_freq": "model-00013-of-00015.safetensors",
|
727 |
+
"model.layers.73.self_attn.v_proj.weight": "model-00013-of-00015.safetensors",
|
728 |
+
"model.layers.74.input_layernorm.weight": "model-00014-of-00015.safetensors",
|
729 |
+
"model.layers.74.mlp.down_proj.weight": "model-00014-of-00015.safetensors",
|
730 |
+
"model.layers.74.mlp.gate_proj.weight": "model-00013-of-00015.safetensors",
|
731 |
+
"model.layers.74.mlp.up_proj.weight": "model-00014-of-00015.safetensors",
|
732 |
+
"model.layers.74.post_attention_layernorm.weight": "model-00014-of-00015.safetensors",
|
733 |
+
"model.layers.74.self_attn.k_proj.weight": "model-00013-of-00015.safetensors",
|
734 |
+
"model.layers.74.self_attn.o_proj.weight": "model-00013-of-00015.safetensors",
|
735 |
+
"model.layers.74.self_attn.q_proj.weight": "model-00013-of-00015.safetensors",
|
736 |
+
"model.layers.74.self_attn.rotary_emb.inv_freq": "model-00013-of-00015.safetensors",
|
737 |
+
"model.layers.74.self_attn.v_proj.weight": "model-00013-of-00015.safetensors",
|
738 |
+
"model.layers.75.input_layernorm.weight": "model-00014-of-00015.safetensors",
|
739 |
+
"model.layers.75.mlp.down_proj.weight": "model-00014-of-00015.safetensors",
|
740 |
+
"model.layers.75.mlp.gate_proj.weight": "model-00014-of-00015.safetensors",
|
741 |
+
"model.layers.75.mlp.up_proj.weight": "model-00014-of-00015.safetensors",
|
742 |
+
"model.layers.75.post_attention_layernorm.weight": "model-00014-of-00015.safetensors",
|
743 |
+
"model.layers.75.self_attn.k_proj.weight": "model-00014-of-00015.safetensors",
|
744 |
+
"model.layers.75.self_attn.o_proj.weight": "model-00014-of-00015.safetensors",
|
745 |
+
"model.layers.75.self_attn.q_proj.weight": "model-00014-of-00015.safetensors",
|
746 |
+
"model.layers.75.self_attn.rotary_emb.inv_freq": "model-00014-of-00015.safetensors",
|
747 |
+
"model.layers.75.self_attn.v_proj.weight": "model-00014-of-00015.safetensors",
|
748 |
+
"model.layers.76.input_layernorm.weight": "model-00014-of-00015.safetensors",
|
749 |
+
"model.layers.76.mlp.down_proj.weight": "model-00014-of-00015.safetensors",
|
750 |
+
"model.layers.76.mlp.gate_proj.weight": "model-00014-of-00015.safetensors",
|
751 |
+
"model.layers.76.mlp.up_proj.weight": "model-00014-of-00015.safetensors",
|
752 |
+
"model.layers.76.post_attention_layernorm.weight": "model-00014-of-00015.safetensors",
|
753 |
+
"model.layers.76.self_attn.k_proj.weight": "model-00014-of-00015.safetensors",
|
754 |
+
"model.layers.76.self_attn.o_proj.weight": "model-00014-of-00015.safetensors",
|
755 |
+
"model.layers.76.self_attn.q_proj.weight": "model-00014-of-00015.safetensors",
|
756 |
+
"model.layers.76.self_attn.rotary_emb.inv_freq": "model-00014-of-00015.safetensors",
|
757 |
+
"model.layers.76.self_attn.v_proj.weight": "model-00014-of-00015.safetensors",
|
758 |
+
"model.layers.77.input_layernorm.weight": "model-00014-of-00015.safetensors",
|
759 |
+
"model.layers.77.mlp.down_proj.weight": "model-00014-of-00015.safetensors",
|
760 |
+
"model.layers.77.mlp.gate_proj.weight": "model-00014-of-00015.safetensors",
|
761 |
+
"model.layers.77.mlp.up_proj.weight": "model-00014-of-00015.safetensors",
|
762 |
+
"model.layers.77.post_attention_layernorm.weight": "model-00014-of-00015.safetensors",
|
763 |
+
"model.layers.77.self_attn.k_proj.weight": "model-00014-of-00015.safetensors",
|
764 |
+
"model.layers.77.self_attn.o_proj.weight": "model-00014-of-00015.safetensors",
|
765 |
+
"model.layers.77.self_attn.q_proj.weight": "model-00014-of-00015.safetensors",
|
766 |
+
"model.layers.77.self_attn.rotary_emb.inv_freq": "model-00014-of-00015.safetensors",
|
767 |
+
"model.layers.77.self_attn.v_proj.weight": "model-00014-of-00015.safetensors",
|
768 |
+
"model.layers.78.input_layernorm.weight": "model-00014-of-00015.safetensors",
|
769 |
+
"model.layers.78.mlp.down_proj.weight": "model-00014-of-00015.safetensors",
|
770 |
+
"model.layers.78.mlp.gate_proj.weight": "model-00014-of-00015.safetensors",
|
771 |
+
"model.layers.78.mlp.up_proj.weight": "model-00014-of-00015.safetensors",
|
772 |
+
"model.layers.78.post_attention_layernorm.weight": "model-00014-of-00015.safetensors",
|
773 |
+
"model.layers.78.self_attn.k_proj.weight": "model-00014-of-00015.safetensors",
|
774 |
+
"model.layers.78.self_attn.o_proj.weight": "model-00014-of-00015.safetensors",
|
775 |
+
"model.layers.78.self_attn.q_proj.weight": "model-00014-of-00015.safetensors",
|
776 |
+
"model.layers.78.self_attn.rotary_emb.inv_freq": "model-00014-of-00015.safetensors",
|
777 |
+
"model.layers.78.self_attn.v_proj.weight": "model-00014-of-00015.safetensors",
|
778 |
+
"model.layers.79.input_layernorm.weight": "model-00014-of-00015.safetensors",
|
779 |
+
"model.layers.79.mlp.down_proj.weight": "model-00014-of-00015.safetensors",
|
780 |
+
"model.layers.79.mlp.gate_proj.weight": "model-00014-of-00015.safetensors",
|
781 |
+
"model.layers.79.mlp.up_proj.weight": "model-00014-of-00015.safetensors",
|
782 |
+
"model.layers.79.post_attention_layernorm.weight": "model-00014-of-00015.safetensors",
|
783 |
+
"model.layers.79.self_attn.k_proj.weight": "model-00014-of-00015.safetensors",
|
784 |
+
"model.layers.79.self_attn.o_proj.weight": "model-00014-of-00015.safetensors",
|
785 |
+
"model.layers.79.self_attn.q_proj.weight": "model-00014-of-00015.safetensors",
|
786 |
+
"model.layers.79.self_attn.rotary_emb.inv_freq": "model-00014-of-00015.safetensors",
|
787 |
+
"model.layers.79.self_attn.v_proj.weight": "model-00014-of-00015.safetensors",
|
788 |
+
"model.layers.8.input_layernorm.weight": "model-00002-of-00015.safetensors",
|
789 |
+
"model.layers.8.mlp.down_proj.weight": "model-00002-of-00015.safetensors",
|
790 |
+
"model.layers.8.mlp.gate_proj.weight": "model-00002-of-00015.safetensors",
|
791 |
+
"model.layers.8.mlp.up_proj.weight": "model-00002-of-00015.safetensors",
|
792 |
+
"model.layers.8.post_attention_layernorm.weight": "model-00002-of-00015.safetensors",
|
793 |
+
"model.layers.8.self_attn.k_proj.weight": "model-00002-of-00015.safetensors",
|
794 |
+
"model.layers.8.self_attn.o_proj.weight": "model-00002-of-00015.safetensors",
|
795 |
+
"model.layers.8.self_attn.q_proj.weight": "model-00002-of-00015.safetensors",
|
796 |
+
"model.layers.8.self_attn.rotary_emb.inv_freq": "model-00002-of-00015.safetensors",
|
797 |
+
"model.layers.8.self_attn.v_proj.weight": "model-00002-of-00015.safetensors",
|
798 |
+
"model.layers.9.input_layernorm.weight": "model-00002-of-00015.safetensors",
|
799 |
+
"model.layers.9.mlp.down_proj.weight": "model-00002-of-00015.safetensors",
|
800 |
+
"model.layers.9.mlp.gate_proj.weight": "model-00002-of-00015.safetensors",
|
801 |
+
"model.layers.9.mlp.up_proj.weight": "model-00002-of-00015.safetensors",
|
802 |
+
"model.layers.9.post_attention_layernorm.weight": "model-00002-of-00015.safetensors",
|
803 |
+
"model.layers.9.self_attn.k_proj.weight": "model-00002-of-00015.safetensors",
|
804 |
+
"model.layers.9.self_attn.o_proj.weight": "model-00002-of-00015.safetensors",
|
805 |
+
"model.layers.9.self_attn.q_proj.weight": "model-00002-of-00015.safetensors",
|
806 |
+
"model.layers.9.self_attn.rotary_emb.inv_freq": "model-00002-of-00015.safetensors",
|
807 |
+
"model.layers.9.self_attn.v_proj.weight": "model-00002-of-00015.safetensors",
|
808 |
+
"model.norm.weight": "model-00014-of-00015.safetensors"
|
809 |
+
}
|
810 |
+
}
|
modeling_dropped_llama.py
ADDED
@@ -0,0 +1,1338 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
5 |
+
# and OPT implementations in this library. It has been modified from its
|
6 |
+
# original forms to accommodate minor architectural differences compared
|
7 |
+
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
8 |
+
#
|
9 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
10 |
+
# you may not use this file except in compliance with the License.
|
11 |
+
# You may obtain a copy of the License at
|
12 |
+
#
|
13 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
14 |
+
#
|
15 |
+
# Unless required by applicable law or agreed to in writing, software
|
16 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
17 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
18 |
+
# See the License for the specific language governing permissions and
|
19 |
+
# limitations under the License.
|
20 |
+
""" transformers==4.38.1"""
|
21 |
+
""" PyTorch LLaMA model."""
|
22 |
+
import math
|
23 |
+
import warnings
|
24 |
+
from typing import List, Optional, Tuple, Union
|
25 |
+
|
26 |
+
import torch
|
27 |
+
import torch.nn.functional as F
|
28 |
+
import torch.utils.checkpoint
|
29 |
+
from torch import nn
|
30 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
31 |
+
|
32 |
+
from transformers.activations import ACT2FN
|
33 |
+
from transformers.cache_utils import Cache, DynamicCache, StaticCache
|
34 |
+
from transformers.modeling_outputs import (
|
35 |
+
BaseModelOutputWithPast,
|
36 |
+
CausalLMOutputWithPast,
|
37 |
+
QuestionAnsweringModelOutput,
|
38 |
+
SequenceClassifierOutputWithPast,
|
39 |
+
)
|
40 |
+
from transformers.modeling_utils import PreTrainedModel
|
41 |
+
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
|
42 |
+
from transformers.utils import (
|
43 |
+
add_start_docstrings,
|
44 |
+
add_start_docstrings_to_model_forward,
|
45 |
+
is_flash_attn_2_available,
|
46 |
+
is_flash_attn_greater_or_equal_2_10,
|
47 |
+
logging,
|
48 |
+
replace_return_docstrings,
|
49 |
+
)
|
50 |
+
from .configuration_dropped_llama import LlamaConfig
|
51 |
+
|
52 |
+
|
53 |
+
# if is_flash_attn_2_available():
|
54 |
+
# from flash_attn import flash_attn_func, flash_attn_varlen_func
|
55 |
+
# from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
|
56 |
+
|
57 |
+
|
58 |
+
logger = logging.get_logger(__name__)
|
59 |
+
|
60 |
+
_CONFIG_FOR_DOC = "LlamaConfig"
|
61 |
+
|
62 |
+
|
63 |
+
def _get_unpad_data(attention_mask):
|
64 |
+
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
65 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
66 |
+
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
67 |
+
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
|
68 |
+
return (
|
69 |
+
indices,
|
70 |
+
cu_seqlens,
|
71 |
+
max_seqlen_in_batch,
|
72 |
+
)
|
73 |
+
|
74 |
+
|
75 |
+
class LlamaRMSNorm(nn.Module):
|
76 |
+
def __init__(self, hidden_size, eps=1e-6):
|
77 |
+
"""
|
78 |
+
LlamaRMSNorm is equivalent to T5LayerNorm
|
79 |
+
"""
|
80 |
+
super().__init__()
|
81 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
82 |
+
self.variance_epsilon = eps
|
83 |
+
|
84 |
+
def forward(self, hidden_states):
|
85 |
+
input_dtype = hidden_states.dtype
|
86 |
+
hidden_states = hidden_states.to(torch.float32)
|
87 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
88 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
89 |
+
return self.weight * hidden_states.to(input_dtype)
|
90 |
+
|
91 |
+
|
92 |
+
ALL_LAYERNORM_LAYERS.append(LlamaRMSNorm)
|
93 |
+
|
94 |
+
|
95 |
+
class LlamaRotaryEmbedding(nn.Module):
|
96 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
|
97 |
+
super().__init__()
|
98 |
+
self.dim = dim
|
99 |
+
self.max_position_embeddings = max_position_embeddings
|
100 |
+
self.base = base
|
101 |
+
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
|
102 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
103 |
+
|
104 |
+
@property
|
105 |
+
def sin_cached(self):
|
106 |
+
logger.warning_once(
|
107 |
+
"The sin_cached attribute will be removed in 4.40. Bear in mind that its contents changed in v4.38. Use "
|
108 |
+
"the forward method of RoPE from now on instead."
|
109 |
+
)
|
110 |
+
return self._sin_cached
|
111 |
+
|
112 |
+
@property
|
113 |
+
def cos_cached(self):
|
114 |
+
logger.warning_once(
|
115 |
+
"The cos_cached attribute will be removed in 4.40. Bear in mind that its contents changed in v4.38. Use "
|
116 |
+
"the forward method of RoPE from now on instead."
|
117 |
+
)
|
118 |
+
return self._cos_cached
|
119 |
+
|
120 |
+
def forward(self, x, position_ids, seq_len=None):
|
121 |
+
if seq_len is not None:
|
122 |
+
logger.warning_once("The `seq_len` argument is deprecated and unused. It will be removed in v4.40.")
|
123 |
+
|
124 |
+
# x: [bs, num_attention_heads, seq_len, head_size]
|
125 |
+
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
|
126 |
+
position_ids_expanded = position_ids[:, None, :].float()
|
127 |
+
freqs = (inv_freq_expanded @ position_ids_expanded).transpose(1, 2)
|
128 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
129 |
+
cos = emb.cos().to(dtype=x.dtype)
|
130 |
+
sin = emb.sin().to(dtype=x.dtype)
|
131 |
+
# backwards compatibility
|
132 |
+
self._cos_cached = cos
|
133 |
+
self._sin_cached = sin
|
134 |
+
return cos, sin
|
135 |
+
|
136 |
+
|
137 |
+
class LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding):
|
138 |
+
"""LlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
|
139 |
+
|
140 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
|
141 |
+
self.scaling_factor = scaling_factor
|
142 |
+
super().__init__(dim, max_position_embeddings, base, device)
|
143 |
+
|
144 |
+
def forward(self, x, position_ids, seq_len=None):
|
145 |
+
# difference to the original RoPE: a scaling factor is aplied to the position ids
|
146 |
+
position_ids = position_ids.float() / self.scaling_factor
|
147 |
+
cos, sin = super().forward(x, position_ids, seq_len)
|
148 |
+
return cos, sin
|
149 |
+
|
150 |
+
|
151 |
+
class LlamaDynamicNTKScalingRotaryEmbedding(LlamaRotaryEmbedding):
|
152 |
+
"""LlamaRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
|
153 |
+
|
154 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
|
155 |
+
self.scaling_factor = scaling_factor
|
156 |
+
super().__init__(dim, max_position_embeddings, base, device)
|
157 |
+
|
158 |
+
def forward(self, x, position_ids, seq_len=None):
|
159 |
+
# difference to the original RoPE: inv_freq is recomputed when the sequence length > original length
|
160 |
+
seq_len = torch.max(position_ids) + 1
|
161 |
+
if seq_len > self.max_position_embeddings:
|
162 |
+
base = self.base * (
|
163 |
+
(self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
|
164 |
+
) ** (self.dim / (self.dim - 2))
|
165 |
+
inv_freq = 1.0 / (
|
166 |
+
base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(x.device) / self.dim)
|
167 |
+
)
|
168 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: this may break with compilation
|
169 |
+
|
170 |
+
cos, sin = super().forward(x, position_ids, seq_len)
|
171 |
+
return cos, sin
|
172 |
+
|
173 |
+
|
174 |
+
def rotate_half(x):
|
175 |
+
"""Rotates half the hidden dims of the input."""
|
176 |
+
x1 = x[..., : x.shape[-1] // 2]
|
177 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
178 |
+
return torch.cat((-x2, x1), dim=-1)
|
179 |
+
|
180 |
+
|
181 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
|
182 |
+
"""Applies Rotary Position Embedding to the query and key tensors.
|
183 |
+
|
184 |
+
Args:
|
185 |
+
q (`torch.Tensor`): The query tensor.
|
186 |
+
k (`torch.Tensor`): The key tensor.
|
187 |
+
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
188 |
+
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
189 |
+
position_ids (`torch.Tensor`, *optional*):
|
190 |
+
Deprecated and unused.
|
191 |
+
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
192 |
+
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
193 |
+
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
194 |
+
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
195 |
+
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
196 |
+
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
197 |
+
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
198 |
+
Returns:
|
199 |
+
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
200 |
+
"""
|
201 |
+
cos = cos.unsqueeze(unsqueeze_dim)
|
202 |
+
sin = sin.unsqueeze(unsqueeze_dim)
|
203 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
204 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
205 |
+
return q_embed, k_embed
|
206 |
+
|
207 |
+
|
208 |
+
class LlamaMLP(nn.Module):
|
209 |
+
def __init__(self, config):
|
210 |
+
super().__init__()
|
211 |
+
self.config = config
|
212 |
+
self.hidden_size = config.hidden_size
|
213 |
+
self.intermediate_size = config.intermediate_size
|
214 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
215 |
+
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
216 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
217 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
218 |
+
|
219 |
+
def forward(self, x):
|
220 |
+
if self.config.pretraining_tp > 1:
|
221 |
+
slice = self.intermediate_size // self.config.pretraining_tp
|
222 |
+
gate_proj_slices = self.gate_proj.weight.split(slice, dim=0)
|
223 |
+
up_proj_slices = self.up_proj.weight.split(slice, dim=0)
|
224 |
+
down_proj_slices = self.down_proj.weight.split(slice, dim=1)
|
225 |
+
|
226 |
+
gate_proj = torch.cat(
|
227 |
+
[F.linear(x, gate_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1
|
228 |
+
)
|
229 |
+
up_proj = torch.cat([F.linear(x, up_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1)
|
230 |
+
|
231 |
+
intermediate_states = (self.act_fn(gate_proj) * up_proj).split(slice, dim=2)
|
232 |
+
down_proj = [
|
233 |
+
F.linear(intermediate_states[i], down_proj_slices[i]) for i in range(self.config.pretraining_tp)
|
234 |
+
]
|
235 |
+
down_proj = sum(down_proj)
|
236 |
+
else:
|
237 |
+
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
|
238 |
+
|
239 |
+
return down_proj
|
240 |
+
|
241 |
+
|
242 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
243 |
+
"""
|
244 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
245 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
246 |
+
"""
|
247 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
248 |
+
if n_rep == 1:
|
249 |
+
return hidden_states
|
250 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
251 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
252 |
+
|
253 |
+
|
254 |
+
class LlamaAttention(nn.Module):
|
255 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
256 |
+
|
257 |
+
def __init__(self, config: LlamaConfig, layer_idx: Optional[int] = None, kv_cache_idx: Optional[int] = None):
|
258 |
+
super().__init__()
|
259 |
+
self.config = config
|
260 |
+
self.layer_idx = layer_idx
|
261 |
+
self.kv_cache_idx = kv_cache_idx
|
262 |
+
if layer_idx is None:
|
263 |
+
logger.warning_once(
|
264 |
+
f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
|
265 |
+
"lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
|
266 |
+
"when creating this class."
|
267 |
+
)
|
268 |
+
|
269 |
+
self.attention_dropout = config.attention_dropout
|
270 |
+
self.hidden_size = config.hidden_size
|
271 |
+
self.num_heads = config.num_attention_heads
|
272 |
+
self.head_dim = self.hidden_size // self.num_heads
|
273 |
+
self.num_key_value_heads = config.num_key_value_heads
|
274 |
+
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
275 |
+
self.max_position_embeddings = config.max_position_embeddings
|
276 |
+
self.rope_theta = config.rope_theta
|
277 |
+
self.is_causal = True
|
278 |
+
|
279 |
+
if (self.head_dim * self.num_heads) != self.hidden_size:
|
280 |
+
raise ValueError(
|
281 |
+
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
|
282 |
+
f" and `num_heads`: {self.num_heads})."
|
283 |
+
)
|
284 |
+
|
285 |
+
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
|
286 |
+
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
|
287 |
+
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
|
288 |
+
self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.attention_bias)
|
289 |
+
self._init_rope()
|
290 |
+
|
291 |
+
def _init_rope(self):
|
292 |
+
if self.config.rope_scaling is None:
|
293 |
+
self.rotary_emb = LlamaRotaryEmbedding(
|
294 |
+
self.head_dim,
|
295 |
+
max_position_embeddings=self.max_position_embeddings,
|
296 |
+
base=self.rope_theta,
|
297 |
+
)
|
298 |
+
else:
|
299 |
+
scaling_type = self.config.rope_scaling["type"]
|
300 |
+
scaling_factor = self.config.rope_scaling["factor"]
|
301 |
+
if scaling_type == "linear":
|
302 |
+
self.rotary_emb = LlamaLinearScalingRotaryEmbedding(
|
303 |
+
self.head_dim,
|
304 |
+
max_position_embeddings=self.max_position_embeddings,
|
305 |
+
scaling_factor=scaling_factor,
|
306 |
+
base=self.rope_theta,
|
307 |
+
)
|
308 |
+
elif scaling_type == "dynamic":
|
309 |
+
self.rotary_emb = LlamaDynamicNTKScalingRotaryEmbedding(
|
310 |
+
self.head_dim,
|
311 |
+
max_position_embeddings=self.max_position_embeddings,
|
312 |
+
scaling_factor=scaling_factor,
|
313 |
+
base=self.rope_theta,
|
314 |
+
)
|
315 |
+
else:
|
316 |
+
raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
|
317 |
+
|
318 |
+
def forward(
|
319 |
+
self,
|
320 |
+
hidden_states: torch.Tensor,
|
321 |
+
attention_mask: Optional[torch.Tensor] = None,
|
322 |
+
position_ids: Optional[torch.LongTensor] = None,
|
323 |
+
past_key_value: Optional[Cache] = None,
|
324 |
+
output_attentions: bool = False,
|
325 |
+
use_cache: bool = False,
|
326 |
+
cache_position: Optional[torch.LongTensor] = None,
|
327 |
+
**kwargs,
|
328 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
329 |
+
bsz, q_len, _ = hidden_states.size()
|
330 |
+
|
331 |
+
if self.config.pretraining_tp > 1:
|
332 |
+
key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.config.pretraining_tp
|
333 |
+
query_slices = self.q_proj.weight.split(
|
334 |
+
(self.num_heads * self.head_dim) // self.config.pretraining_tp, dim=0
|
335 |
+
)
|
336 |
+
key_slices = self.k_proj.weight.split(key_value_slicing, dim=0)
|
337 |
+
value_slices = self.v_proj.weight.split(key_value_slicing, dim=0)
|
338 |
+
|
339 |
+
query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.config.pretraining_tp)]
|
340 |
+
query_states = torch.cat(query_states, dim=-1)
|
341 |
+
|
342 |
+
key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.config.pretraining_tp)]
|
343 |
+
key_states = torch.cat(key_states, dim=-1)
|
344 |
+
|
345 |
+
value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.config.pretraining_tp)]
|
346 |
+
value_states = torch.cat(value_states, dim=-1)
|
347 |
+
|
348 |
+
else:
|
349 |
+
query_states = self.q_proj(hidden_states)
|
350 |
+
key_states = self.k_proj(hidden_states)
|
351 |
+
value_states = self.v_proj(hidden_states)
|
352 |
+
|
353 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
354 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
355 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
356 |
+
|
357 |
+
past_key_value = getattr(self, "past_key_value", past_key_value)
|
358 |
+
cos, sin = self.rotary_emb(value_states, position_ids)
|
359 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
|
360 |
+
|
361 |
+
if past_key_value is not None:
|
362 |
+
# sin and cos are specific to RoPE models; position_ids needed for the static cache
|
363 |
+
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
|
364 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.kv_cache_idx, cache_kwargs)
|
365 |
+
|
366 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
367 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
368 |
+
|
369 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
370 |
+
|
371 |
+
if attention_mask is not None: # no matter the length, we just slice it
|
372 |
+
if cache_position is not None:
|
373 |
+
causal_mask = attention_mask[:, :, cache_position, : key_states.shape[-2]]
|
374 |
+
attn_weights = attn_weights + causal_mask
|
375 |
+
|
376 |
+
# upcast attention to fp32
|
377 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
378 |
+
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
|
379 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
380 |
+
|
381 |
+
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
382 |
+
raise ValueError(
|
383 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
|
384 |
+
f" {attn_output.size()}"
|
385 |
+
)
|
386 |
+
|
387 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
388 |
+
|
389 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
390 |
+
|
391 |
+
if self.config.pretraining_tp > 1:
|
392 |
+
attn_output = attn_output.split(self.hidden_size // self.config.pretraining_tp, dim=2)
|
393 |
+
o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.config.pretraining_tp, dim=1)
|
394 |
+
attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.config.pretraining_tp)])
|
395 |
+
else:
|
396 |
+
attn_output = self.o_proj(attn_output)
|
397 |
+
|
398 |
+
if not output_attentions:
|
399 |
+
attn_weights = None
|
400 |
+
|
401 |
+
return attn_output, attn_weights, past_key_value
|
402 |
+
|
403 |
+
|
404 |
+
class LlamaSdpaAttention(LlamaAttention):
|
405 |
+
"""
|
406 |
+
Llama attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
|
407 |
+
`LlamaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
|
408 |
+
SDPA API.
|
409 |
+
"""
|
410 |
+
|
411 |
+
# Adapted from LlamaAttention.forward
|
412 |
+
def forward(
|
413 |
+
self,
|
414 |
+
hidden_states: torch.Tensor,
|
415 |
+
attention_mask: Optional[torch.Tensor] = None,
|
416 |
+
position_ids: Optional[torch.LongTensor] = None,
|
417 |
+
past_key_value: Optional[Cache] = None,
|
418 |
+
output_attentions: bool = False,
|
419 |
+
use_cache: bool = False,
|
420 |
+
cache_position: Optional[torch.LongTensor] = None,
|
421 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
422 |
+
if output_attentions:
|
423 |
+
# TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
|
424 |
+
logger.warning_once(
|
425 |
+
"LlamaModel is using LlamaSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
|
426 |
+
'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
427 |
+
)
|
428 |
+
return super().forward(
|
429 |
+
hidden_states=hidden_states,
|
430 |
+
attention_mask=attention_mask,
|
431 |
+
position_ids=position_ids,
|
432 |
+
past_key_value=past_key_value,
|
433 |
+
output_attentions=output_attentions,
|
434 |
+
use_cache=use_cache,
|
435 |
+
cache_position=cache_position,
|
436 |
+
)
|
437 |
+
|
438 |
+
bsz, q_len, _ = hidden_states.size()
|
439 |
+
|
440 |
+
query_states = self.q_proj(hidden_states)
|
441 |
+
key_states = self.k_proj(hidden_states)
|
442 |
+
value_states = self.v_proj(hidden_states)
|
443 |
+
|
444 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
445 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
446 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
447 |
+
|
448 |
+
cos, sin = self.rotary_emb(value_states, position_ids)
|
449 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
|
450 |
+
|
451 |
+
past_key_value = getattr(self, "past_key_value", past_key_value)
|
452 |
+
|
453 |
+
if past_key_value is not None:
|
454 |
+
# sin and cos are specific to RoPE models; position_ids needed for the static cache
|
455 |
+
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
|
456 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.kv_cache_idx, cache_kwargs)
|
457 |
+
|
458 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
459 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
460 |
+
|
461 |
+
causal_mask = attention_mask
|
462 |
+
if attention_mask is not None and cache_position is not None:
|
463 |
+
causal_mask = causal_mask[:, :, cache_position, : key_states.shape[-2]]
|
464 |
+
|
465 |
+
# SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
|
466 |
+
# Reference: https://github.com/pytorch/pytorch/issues/112577.
|
467 |
+
if query_states.device.type == "cuda" and causal_mask is not None:
|
468 |
+
query_states = query_states.contiguous()
|
469 |
+
key_states = key_states.contiguous()
|
470 |
+
value_states = value_states.contiguous()
|
471 |
+
|
472 |
+
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
473 |
+
query_states,
|
474 |
+
key_states,
|
475 |
+
value_states,
|
476 |
+
attn_mask=causal_mask,
|
477 |
+
dropout_p=self.attention_dropout if self.training else 0.0,
|
478 |
+
)
|
479 |
+
|
480 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
481 |
+
attn_output = attn_output.view(bsz, q_len, self.hidden_size)
|
482 |
+
|
483 |
+
attn_output = self.o_proj(attn_output)
|
484 |
+
|
485 |
+
return attn_output, None, past_key_value
|
486 |
+
|
487 |
+
|
488 |
+
LLAMA_ATTENTION_CLASSES = {
|
489 |
+
"eager": LlamaAttention,
|
490 |
+
"sdpa": LlamaSdpaAttention,
|
491 |
+
}
|
492 |
+
|
493 |
+
|
494 |
+
class LlamaDecoderLayer(nn.Module):
|
495 |
+
def __init__(self, config: LlamaConfig, layer_idx: int):
|
496 |
+
super().__init__()
|
497 |
+
self.hidden_size = config.hidden_size
|
498 |
+
self.layer_idx = layer_idx
|
499 |
+
|
500 |
+
self.kv_cache_idx = 0
|
501 |
+
for i in range(self.layer_idx):
|
502 |
+
if not config.drop_attn_list[i]:
|
503 |
+
self.kv_cache_idx += 1
|
504 |
+
|
505 |
+
self.drop_attn = config.drop_attn_list[layer_idx]
|
506 |
+
if self.drop_attn:
|
507 |
+
self.self_attn = None
|
508 |
+
self.input_layernorm = None
|
509 |
+
else:
|
510 |
+
self.self_attn = LLAMA_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx, kv_cache_idx=self.kv_cache_idx)
|
511 |
+
self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
512 |
+
self.drop_mlp = config.drop_mlp_list[layer_idx]
|
513 |
+
if self.drop_mlp:
|
514 |
+
self.mlp = None
|
515 |
+
self.post_attention_layernorm = None
|
516 |
+
else:
|
517 |
+
self.mlp = LlamaMLP(config)
|
518 |
+
self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
519 |
+
|
520 |
+
|
521 |
+
def forward(
|
522 |
+
self,
|
523 |
+
hidden_states: torch.Tensor,
|
524 |
+
attention_mask: Optional[torch.Tensor] = None,
|
525 |
+
position_ids: Optional[torch.LongTensor] = None,
|
526 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
527 |
+
output_attentions: Optional[bool] = False,
|
528 |
+
use_cache: Optional[bool] = False,
|
529 |
+
cache_position: Optional[torch.LongTensor] = None,
|
530 |
+
**kwargs,
|
531 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
532 |
+
"""
|
533 |
+
Args:
|
534 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
535 |
+
attention_mask (`torch.FloatTensor`, *optional*):
|
536 |
+
attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
|
537 |
+
query_sequence_length, key_sequence_length)` if default attention is used.
|
538 |
+
output_attentions (`bool`, *optional*):
|
539 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
540 |
+
returned tensors for more detail.
|
541 |
+
use_cache (`bool`, *optional*):
|
542 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
543 |
+
(see `past_key_values`).
|
544 |
+
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
545 |
+
"""
|
546 |
+
if "padding_mask" in kwargs:
|
547 |
+
warnings.warn(
|
548 |
+
"Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
549 |
+
)
|
550 |
+
|
551 |
+
if not self.drop_attn:
|
552 |
+
residual = hidden_states
|
553 |
+
|
554 |
+
hidden_states = self.input_layernorm(hidden_states)
|
555 |
+
|
556 |
+
# Self Attention
|
557 |
+
hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
558 |
+
hidden_states=hidden_states,
|
559 |
+
attention_mask=attention_mask,
|
560 |
+
position_ids=position_ids,
|
561 |
+
past_key_value=past_key_value,
|
562 |
+
output_attentions=output_attentions,
|
563 |
+
use_cache=use_cache,
|
564 |
+
cache_position=cache_position,
|
565 |
+
**kwargs,
|
566 |
+
)
|
567 |
+
hidden_states = residual + hidden_states
|
568 |
+
|
569 |
+
if not self.drop_mlp:
|
570 |
+
# Fully Connected
|
571 |
+
residual = hidden_states
|
572 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
573 |
+
hidden_states = self.mlp(hidden_states)
|
574 |
+
hidden_states = residual + hidden_states
|
575 |
+
|
576 |
+
outputs = (hidden_states,)
|
577 |
+
|
578 |
+
if output_attentions:
|
579 |
+
outputs += (self_attn_weights,)
|
580 |
+
if use_cache and not self.drop_attn:
|
581 |
+
outputs += (present_key_value,)
|
582 |
+
# print(outputs)
|
583 |
+
return outputs
|
584 |
+
|
585 |
+
|
586 |
+
LLAMA_START_DOCSTRING = r"""
|
587 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
588 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
589 |
+
etc.)
|
590 |
+
|
591 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
592 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
593 |
+
and behavior.
|
594 |
+
|
595 |
+
Parameters:
|
596 |
+
config ([`LlamaConfig`]):
|
597 |
+
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
598 |
+
load the weights associated with the model, only the configuration. Check out the
|
599 |
+
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
600 |
+
"""
|
601 |
+
|
602 |
+
|
603 |
+
@add_start_docstrings(
|
604 |
+
"The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
|
605 |
+
LLAMA_START_DOCSTRING,
|
606 |
+
)
|
607 |
+
class LlamaPreTrainedModel(PreTrainedModel):
|
608 |
+
config_class = LlamaConfig
|
609 |
+
base_model_prefix = "model"
|
610 |
+
supports_gradient_checkpointing = True
|
611 |
+
_no_split_modules = ["LlamaDecoderLayer"]
|
612 |
+
_skip_keys_device_placement = ["past_key_values", "causal_mask"]
|
613 |
+
_supports_flash_attn_2 = True
|
614 |
+
_supports_sdpa = True
|
615 |
+
_supports_cache_class = True
|
616 |
+
|
617 |
+
def _init_weights(self, module):
|
618 |
+
std = self.config.initializer_range
|
619 |
+
if isinstance(module, nn.Linear):
|
620 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
621 |
+
if module.bias is not None:
|
622 |
+
module.bias.data.zero_()
|
623 |
+
elif isinstance(module, nn.Embedding):
|
624 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
625 |
+
if module.padding_idx is not None:
|
626 |
+
module.weight.data[module.padding_idx].zero_()
|
627 |
+
|
628 |
+
def _setup_cache(self, cache_cls, max_batch_size, max_cache_len: Optional[int] = None):
|
629 |
+
if self.config._attn_implementation == "flash_attention_2" and cache_cls == StaticCache:
|
630 |
+
raise ValueError(
|
631 |
+
"`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` "
|
632 |
+
"make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers"
|
633 |
+
)
|
634 |
+
|
635 |
+
if max_cache_len > self.model.causal_mask.shape[-1] or self.device != self.model.causal_mask.device:
|
636 |
+
causal_mask = torch.full((max_cache_len, max_cache_len), fill_value=1, device=self.device)
|
637 |
+
self.register_buffer("causal_mask", torch.triu(causal_mask, diagonal=1), persistent=False)
|
638 |
+
|
639 |
+
for layer in self.model.layers:
|
640 |
+
weights = layer.self_attn.o_proj.weight
|
641 |
+
layer.self_attn.past_key_value = cache_cls(
|
642 |
+
self.config, max_batch_size, max_cache_len, device=weights.device, dtype=weights.dtype
|
643 |
+
)
|
644 |
+
|
645 |
+
def _reset_cache(self):
|
646 |
+
for layer in self.model.layers:
|
647 |
+
layer.self_attn.past_key_value = None
|
648 |
+
|
649 |
+
|
650 |
+
LLAMA_INPUTS_DOCSTRING = r"""
|
651 |
+
Args:
|
652 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
653 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
654 |
+
it.
|
655 |
+
|
656 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
657 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
658 |
+
|
659 |
+
[What are input IDs?](../glossary#input-ids)
|
660 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
661 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
662 |
+
|
663 |
+
- 1 for tokens that are **not masked**,
|
664 |
+
- 0 for tokens that are **masked**.
|
665 |
+
|
666 |
+
[What are attention masks?](../glossary#attention-mask)
|
667 |
+
|
668 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
669 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
670 |
+
|
671 |
+
If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
|
672 |
+
`past_key_values`).
|
673 |
+
|
674 |
+
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
675 |
+
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
676 |
+
information on the default strategy.
|
677 |
+
|
678 |
+
- 1 indicates the head is **not masked**,
|
679 |
+
- 0 indicates the head is **masked**.
|
680 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
681 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
682 |
+
config.n_positions - 1]`.
|
683 |
+
|
684 |
+
[What are position IDs?](../glossary#position-ids)
|
685 |
+
past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
|
686 |
+
Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
687 |
+
blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
|
688 |
+
returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
|
689 |
+
|
690 |
+
Two formats are allowed:
|
691 |
+
- a [`~cache_utils.Cache`] instance;
|
692 |
+
- Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
|
693 |
+
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
|
694 |
+
cache format.
|
695 |
+
|
696 |
+
The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
|
697 |
+
legacy cache format will be returned.
|
698 |
+
|
699 |
+
If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
|
700 |
+
have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
|
701 |
+
of shape `(batch_size, sequence_length)`.
|
702 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
703 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
704 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
705 |
+
model's internal embedding lookup matrix.
|
706 |
+
use_cache (`bool`, *optional*):
|
707 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
708 |
+
`past_key_values`).
|
709 |
+
output_attentions (`bool`, *optional*):
|
710 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
711 |
+
tensors for more detail.
|
712 |
+
output_hidden_states (`bool`, *optional*):
|
713 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
714 |
+
more detail.
|
715 |
+
return_dict (`bool`, *optional*):
|
716 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
717 |
+
"""
|
718 |
+
|
719 |
+
|
720 |
+
@add_start_docstrings(
|
721 |
+
"The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
|
722 |
+
LLAMA_START_DOCSTRING,
|
723 |
+
)
|
724 |
+
class LlamaModel(LlamaPreTrainedModel):
|
725 |
+
"""
|
726 |
+
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`]
|
727 |
+
|
728 |
+
Args:
|
729 |
+
config: LlamaConfig
|
730 |
+
"""
|
731 |
+
|
732 |
+
def __init__(self, config: LlamaConfig):
|
733 |
+
super().__init__(config)
|
734 |
+
self.padding_idx = config.pad_token_id
|
735 |
+
self.vocab_size = config.vocab_size
|
736 |
+
|
737 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
738 |
+
self.layers = nn.ModuleList(
|
739 |
+
[LlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
740 |
+
)
|
741 |
+
self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
742 |
+
self.gradient_checkpointing = False
|
743 |
+
|
744 |
+
# register a causal mask to separate causal and padding mask creation. Merging happends in the attention class
|
745 |
+
causal_mask = torch.full((config.max_position_embeddings, config.max_position_embeddings), fill_value=1)
|
746 |
+
self.register_buffer("causal_mask", torch.triu(causal_mask, diagonal=1), persistent=False)
|
747 |
+
# Initialize weights and apply final processing
|
748 |
+
self.post_init()
|
749 |
+
|
750 |
+
def get_input_embeddings(self):
|
751 |
+
return self.embed_tokens
|
752 |
+
|
753 |
+
def set_input_embeddings(self, value):
|
754 |
+
self.embed_tokens = value
|
755 |
+
|
756 |
+
@add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
|
757 |
+
def forward(
|
758 |
+
self,
|
759 |
+
input_ids: torch.LongTensor = None,
|
760 |
+
attention_mask: Optional[torch.Tensor] = None,
|
761 |
+
position_ids: Optional[torch.LongTensor] = None,
|
762 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
763 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
764 |
+
use_cache: Optional[bool] = None,
|
765 |
+
output_attentions: Optional[bool] = None,
|
766 |
+
output_hidden_states: Optional[bool] = None,
|
767 |
+
return_dict: Optional[bool] = None,
|
768 |
+
cache_position: Optional[torch.LongTensor] = None,
|
769 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
770 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
771 |
+
output_hidden_states = (
|
772 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
773 |
+
)
|
774 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
775 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
776 |
+
# use_cache = False
|
777 |
+
if (input_ids is None) ^ (inputs_embeds is not None):
|
778 |
+
raise ValueError(
|
779 |
+
"You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
|
780 |
+
)
|
781 |
+
|
782 |
+
if self.gradient_checkpointing and self.training and use_cache:
|
783 |
+
logger.warning_once(
|
784 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
|
785 |
+
)
|
786 |
+
use_cache = False
|
787 |
+
|
788 |
+
if inputs_embeds is None:
|
789 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
790 |
+
|
791 |
+
past_seen_tokens = 0
|
792 |
+
if use_cache: # kept for BC (cache positions)
|
793 |
+
if not isinstance(past_key_values, StaticCache):
|
794 |
+
past_key_values = DynamicCache.from_legacy_cache(past_key_values)
|
795 |
+
past_seen_tokens = past_key_values.get_seq_length()
|
796 |
+
|
797 |
+
if cache_position is None:
|
798 |
+
cache_position = torch.arange(
|
799 |
+
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
|
800 |
+
)
|
801 |
+
|
802 |
+
if position_ids is None:
|
803 |
+
position_ids = cache_position.unsqueeze(0)
|
804 |
+
|
805 |
+
causal_mask = self._update_causal_mask(attention_mask, inputs_embeds)
|
806 |
+
|
807 |
+
# embed positions
|
808 |
+
hidden_states = inputs_embeds
|
809 |
+
|
810 |
+
# decoder layers
|
811 |
+
all_hidden_states = () if output_hidden_states else None
|
812 |
+
all_self_attns = () if output_attentions else None
|
813 |
+
next_decoder_cache = None
|
814 |
+
|
815 |
+
for decoder_layer in self.layers:
|
816 |
+
if output_hidden_states:
|
817 |
+
all_hidden_states += (hidden_states,)
|
818 |
+
|
819 |
+
if self.gradient_checkpointing and self.training:
|
820 |
+
layer_outputs = self._gradient_checkpointing_func(
|
821 |
+
decoder_layer.__call__,
|
822 |
+
hidden_states,
|
823 |
+
causal_mask,
|
824 |
+
position_ids,
|
825 |
+
past_key_values,
|
826 |
+
output_attentions,
|
827 |
+
use_cache,
|
828 |
+
cache_position,
|
829 |
+
)
|
830 |
+
else:
|
831 |
+
layer_outputs = decoder_layer(
|
832 |
+
hidden_states,
|
833 |
+
attention_mask=causal_mask,
|
834 |
+
position_ids=position_ids,
|
835 |
+
past_key_value=past_key_values,
|
836 |
+
output_attentions=output_attentions,
|
837 |
+
use_cache=use_cache,
|
838 |
+
cache_position=cache_position,
|
839 |
+
)
|
840 |
+
|
841 |
+
hidden_states = layer_outputs[0]
|
842 |
+
|
843 |
+
if use_cache and not decoder_layer.drop_attn:
|
844 |
+
next_decoder_cache = layer_outputs[2 if output_attentions else 1]
|
845 |
+
|
846 |
+
if output_attentions and not decoder_layer.drop_attn:
|
847 |
+
all_self_attns += (layer_outputs[1],)
|
848 |
+
|
849 |
+
hidden_states = self.norm(hidden_states)
|
850 |
+
|
851 |
+
# add hidden states from the last decoder layer
|
852 |
+
if output_hidden_states:
|
853 |
+
all_hidden_states += (hidden_states,)
|
854 |
+
|
855 |
+
next_cache = None
|
856 |
+
if use_cache:
|
857 |
+
next_cache = (
|
858 |
+
next_decoder_cache.to_legacy_cache() if isinstance(next_decoder_cache, Cache) else next_decoder_cache
|
859 |
+
)
|
860 |
+
# print(next_cache)
|
861 |
+
if not return_dict:
|
862 |
+
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
|
863 |
+
return BaseModelOutputWithPast(
|
864 |
+
last_hidden_state=hidden_states,
|
865 |
+
past_key_values=next_cache,
|
866 |
+
hidden_states=all_hidden_states,
|
867 |
+
attentions=all_self_attns,
|
868 |
+
)
|
869 |
+
|
870 |
+
def _update_causal_mask(self, attention_mask, input_tensor):
|
871 |
+
if self.config._attn_implementation == "flash_attention_2":
|
872 |
+
if attention_mask is not None and 0.0 in attention_mask:
|
873 |
+
return attention_mask
|
874 |
+
return None
|
875 |
+
|
876 |
+
batch_size, seq_length = input_tensor.shape[:2]
|
877 |
+
dtype = input_tensor.dtype
|
878 |
+
device = input_tensor.device
|
879 |
+
|
880 |
+
# support going beyond cached `max_position_embedding`
|
881 |
+
if seq_length > self.causal_mask.shape[-1]:
|
882 |
+
causal_mask = torch.full((2 * self.causal_mask.shape[-1], 2 * self.causal_mask.shape[-1]), fill_value=1)
|
883 |
+
self.register_buffer("causal_mask", torch.triu(causal_mask, diagonal=1), persistent=False)
|
884 |
+
|
885 |
+
if hasattr(self, "causal_mask"): # we use the current dtype to avoid any overflows
|
886 |
+
causal_mask = (
|
887 |
+
self.causal_mask[None, None, :, :].repeat(batch_size, 1, 1, 1).to(dtype) * torch.finfo(dtype).min
|
888 |
+
)
|
889 |
+
else:
|
890 |
+
mask = torch.full(
|
891 |
+
(self.config.max_position_embeddings, self.config.max_position_embeddings),
|
892 |
+
fill_value=torch.finfo(dtype).min,
|
893 |
+
)
|
894 |
+
causal_mask = torch.triu(mask, diagonal=1)
|
895 |
+
|
896 |
+
causal_mask = causal_mask.to(dtype=dtype, device=device)
|
897 |
+
if attention_mask is not None and attention_mask.dim() == 2:
|
898 |
+
mask_length = attention_mask.shape[-1]
|
899 |
+
padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)
|
900 |
+
causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(
|
901 |
+
padding_mask, torch.finfo(dtype).min
|
902 |
+
)
|
903 |
+
|
904 |
+
if self.config._attn_implementation == "sdpa":
|
905 |
+
is_tracing = torch.jit.is_tracing() or isinstance(input_tensor, torch.fx.Proxy)
|
906 |
+
if not is_tracing and attention_mask is not None and torch.any(attention_mask != 1):
|
907 |
+
causal_mask = causal_mask.mul(~torch.all(causal_mask == causal_mask.min(), dim=-1)[..., None]).to(
|
908 |
+
dtype
|
909 |
+
)
|
910 |
+
|
911 |
+
return causal_mask
|
912 |
+
|
913 |
+
|
914 |
+
class LlamaForCausalLM(LlamaPreTrainedModel):
|
915 |
+
_tied_weights_keys = ["lm_head.weight"]
|
916 |
+
|
917 |
+
def __init__(self, config):
|
918 |
+
super().__init__(config)
|
919 |
+
self.model = LlamaModel(config)
|
920 |
+
self.vocab_size = config.vocab_size
|
921 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
922 |
+
|
923 |
+
# Initialize weights and apply final processing
|
924 |
+
self.post_init()
|
925 |
+
|
926 |
+
def get_input_embeddings(self):
|
927 |
+
return self.model.embed_tokens
|
928 |
+
|
929 |
+
def set_input_embeddings(self, value):
|
930 |
+
self.model.embed_tokens = value
|
931 |
+
|
932 |
+
def get_output_embeddings(self):
|
933 |
+
return self.lm_head
|
934 |
+
|
935 |
+
def set_output_embeddings(self, new_embeddings):
|
936 |
+
self.lm_head = new_embeddings
|
937 |
+
|
938 |
+
def set_decoder(self, decoder):
|
939 |
+
self.model = decoder
|
940 |
+
|
941 |
+
def get_decoder(self):
|
942 |
+
return self.model
|
943 |
+
|
944 |
+
@add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
|
945 |
+
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
946 |
+
def forward(
|
947 |
+
self,
|
948 |
+
input_ids: torch.LongTensor = None,
|
949 |
+
attention_mask: Optional[torch.Tensor] = None,
|
950 |
+
position_ids: Optional[torch.LongTensor] = None,
|
951 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
952 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
953 |
+
labels: Optional[torch.LongTensor] = None,
|
954 |
+
use_cache: Optional[bool] = None,
|
955 |
+
output_attentions: Optional[bool] = None,
|
956 |
+
output_hidden_states: Optional[bool] = None,
|
957 |
+
return_dict: Optional[bool] = None,
|
958 |
+
cache_position: Optional[torch.LongTensor] = None,
|
959 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
960 |
+
r"""
|
961 |
+
Args:
|
962 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
963 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
964 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
965 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
966 |
+
|
967 |
+
Returns:
|
968 |
+
|
969 |
+
Example:
|
970 |
+
|
971 |
+
```python
|
972 |
+
>>> from transformers import AutoTokenizer, LlamaForCausalLM
|
973 |
+
|
974 |
+
>>> model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf")
|
975 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")
|
976 |
+
|
977 |
+
>>> prompt = "Hey, are you conscious? Can you talk to me?"
|
978 |
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
979 |
+
|
980 |
+
>>> # Generate
|
981 |
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
982 |
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
983 |
+
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
984 |
+
```"""
|
985 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
986 |
+
output_hidden_states = (
|
987 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
988 |
+
)
|
989 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
990 |
+
|
991 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
992 |
+
outputs = self.model(
|
993 |
+
input_ids=input_ids,
|
994 |
+
attention_mask=attention_mask,
|
995 |
+
position_ids=position_ids,
|
996 |
+
past_key_values=past_key_values,
|
997 |
+
inputs_embeds=inputs_embeds,
|
998 |
+
use_cache=use_cache,
|
999 |
+
output_attentions=output_attentions,
|
1000 |
+
output_hidden_states=output_hidden_states,
|
1001 |
+
return_dict=return_dict,
|
1002 |
+
cache_position=cache_position,
|
1003 |
+
)
|
1004 |
+
|
1005 |
+
hidden_states = outputs[0]
|
1006 |
+
if self.config.pretraining_tp > 1:
|
1007 |
+
lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0)
|
1008 |
+
logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)]
|
1009 |
+
logits = torch.cat(logits, dim=-1)
|
1010 |
+
else:
|
1011 |
+
logits = self.lm_head(hidden_states)
|
1012 |
+
logits = logits.float()
|
1013 |
+
|
1014 |
+
loss = None
|
1015 |
+
if labels is not None:
|
1016 |
+
# Shift so that tokens < n predict n
|
1017 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
1018 |
+
shift_labels = labels[..., 1:].contiguous()
|
1019 |
+
# Flatten the tokens
|
1020 |
+
loss_fct = CrossEntropyLoss()
|
1021 |
+
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
1022 |
+
shift_labels = shift_labels.view(-1)
|
1023 |
+
# Enable model parallelism
|
1024 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
1025 |
+
loss = loss_fct(shift_logits, shift_labels)
|
1026 |
+
|
1027 |
+
if not return_dict:
|
1028 |
+
output = (logits,) + outputs[1:]
|
1029 |
+
return (loss,) + output if loss is not None else output
|
1030 |
+
|
1031 |
+
return CausalLMOutputWithPast(
|
1032 |
+
loss=loss,
|
1033 |
+
logits=logits,
|
1034 |
+
past_key_values=outputs.past_key_values,
|
1035 |
+
hidden_states=outputs.hidden_states,
|
1036 |
+
attentions=outputs.attentions,
|
1037 |
+
)
|
1038 |
+
|
1039 |
+
def prepare_inputs_for_generation(
|
1040 |
+
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
|
1041 |
+
):
|
1042 |
+
past_length = 0
|
1043 |
+
if past_key_values is not None:
|
1044 |
+
if isinstance(past_key_values, Cache):
|
1045 |
+
cache_length = past_key_values.get_seq_length()
|
1046 |
+
past_length = past_key_values.seen_tokens
|
1047 |
+
max_cache_length = past_key_values.get_max_length()
|
1048 |
+
else:
|
1049 |
+
cache_length = past_length = past_key_values[0][0].shape[2]
|
1050 |
+
max_cache_length = None
|
1051 |
+
|
1052 |
+
# Keep only the unprocessed tokens:
|
1053 |
+
# 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
|
1054 |
+
# some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
|
1055 |
+
# input)
|
1056 |
+
if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
|
1057 |
+
input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
|
1058 |
+
# 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
|
1059 |
+
# input_ids based on the past_length.
|
1060 |
+
elif past_length < input_ids.shape[1]:
|
1061 |
+
input_ids = input_ids[:, past_length:]
|
1062 |
+
# 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
|
1063 |
+
|
1064 |
+
# If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
|
1065 |
+
if (
|
1066 |
+
max_cache_length is not None
|
1067 |
+
and attention_mask is not None
|
1068 |
+
and cache_length + input_ids.shape[1] > max_cache_length
|
1069 |
+
):
|
1070 |
+
attention_mask = attention_mask[:, -max_cache_length:]
|
1071 |
+
|
1072 |
+
position_ids = kwargs.get("position_ids", None)
|
1073 |
+
if attention_mask is not None and position_ids is None:
|
1074 |
+
# create position_ids on the fly for batch generation
|
1075 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
1076 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
1077 |
+
if past_key_values:
|
1078 |
+
position_ids = position_ids[:, -input_ids.shape[1] :]
|
1079 |
+
|
1080 |
+
if past_key_value := getattr(self.model.layers[0].self_attn, "past_key_value", None):
|
1081 |
+
# generation with static cache
|
1082 |
+
past_length = past_key_value.get_seq_length()
|
1083 |
+
input_ids = input_ids[:, past_length:]
|
1084 |
+
position_ids = position_ids[:, past_length:]
|
1085 |
+
|
1086 |
+
# TODO @gante we should only keep a `cache_position` in generate, and do +=1.
|
1087 |
+
# same goes for position ids. Could also help with continued generation.
|
1088 |
+
cache_position = kwargs.get("cache_position", None)
|
1089 |
+
if cache_position is None:
|
1090 |
+
cache_position = torch.arange(
|
1091 |
+
past_length, past_length + position_ids.shape[-1], device=position_ids.device
|
1092 |
+
)
|
1093 |
+
|
1094 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
1095 |
+
if inputs_embeds is not None and past_key_values is None:
|
1096 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
1097 |
+
else:
|
1098 |
+
model_inputs = {"input_ids": input_ids}
|
1099 |
+
|
1100 |
+
model_inputs.update(
|
1101 |
+
{
|
1102 |
+
"position_ids": position_ids,
|
1103 |
+
"cache_position": cache_position,
|
1104 |
+
"past_key_values": past_key_values,
|
1105 |
+
"use_cache": kwargs.get("use_cache"),
|
1106 |
+
"attention_mask": attention_mask,
|
1107 |
+
}
|
1108 |
+
)
|
1109 |
+
return model_inputs
|
1110 |
+
|
1111 |
+
@staticmethod
|
1112 |
+
def _reorder_cache(past_key_values, beam_idx):
|
1113 |
+
reordered_past = ()
|
1114 |
+
for layer_past in past_key_values:
|
1115 |
+
reordered_past += (
|
1116 |
+
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
|
1117 |
+
)
|
1118 |
+
return reordered_past
|
1119 |
+
|
1120 |
+
|
1121 |
+
@add_start_docstrings(
|
1122 |
+
"""
|
1123 |
+
The LLaMa Model transformer with a sequence classification head on top (linear layer).
|
1124 |
+
|
1125 |
+
[`LlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
1126 |
+
(e.g. GPT-2) do.
|
1127 |
+
|
1128 |
+
Since it does classification on the last token, it requires to know the position of the last token. If a
|
1129 |
+
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
1130 |
+
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
1131 |
+
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
1132 |
+
each row of the batch).
|
1133 |
+
""",
|
1134 |
+
LLAMA_START_DOCSTRING,
|
1135 |
+
)
|
1136 |
+
class LlamaForSequenceClassification(LlamaPreTrainedModel):
|
1137 |
+
def __init__(self, config):
|
1138 |
+
super().__init__(config)
|
1139 |
+
self.num_labels = config.num_labels
|
1140 |
+
self.model = LlamaModel(config)
|
1141 |
+
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
1142 |
+
|
1143 |
+
# Initialize weights and apply final processing
|
1144 |
+
self.post_init()
|
1145 |
+
|
1146 |
+
def get_input_embeddings(self):
|
1147 |
+
return self.model.embed_tokens
|
1148 |
+
|
1149 |
+
def set_input_embeddings(self, value):
|
1150 |
+
self.model.embed_tokens = value
|
1151 |
+
|
1152 |
+
@add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
|
1153 |
+
def forward(
|
1154 |
+
self,
|
1155 |
+
input_ids: torch.LongTensor = None,
|
1156 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1157 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1158 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1159 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1160 |
+
labels: Optional[torch.LongTensor] = None,
|
1161 |
+
use_cache: Optional[bool] = None,
|
1162 |
+
output_attentions: Optional[bool] = None,
|
1163 |
+
output_hidden_states: Optional[bool] = None,
|
1164 |
+
return_dict: Optional[bool] = None,
|
1165 |
+
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
1166 |
+
r"""
|
1167 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1168 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
1169 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
1170 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
1171 |
+
"""
|
1172 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1173 |
+
|
1174 |
+
transformer_outputs = self.model(
|
1175 |
+
input_ids,
|
1176 |
+
attention_mask=attention_mask,
|
1177 |
+
position_ids=position_ids,
|
1178 |
+
past_key_values=past_key_values,
|
1179 |
+
inputs_embeds=inputs_embeds,
|
1180 |
+
use_cache=use_cache,
|
1181 |
+
output_attentions=output_attentions,
|
1182 |
+
output_hidden_states=output_hidden_states,
|
1183 |
+
return_dict=return_dict,
|
1184 |
+
)
|
1185 |
+
hidden_states = transformer_outputs[0]
|
1186 |
+
logits = self.score(hidden_states)
|
1187 |
+
|
1188 |
+
if input_ids is not None:
|
1189 |
+
batch_size = input_ids.shape[0]
|
1190 |
+
else:
|
1191 |
+
batch_size = inputs_embeds.shape[0]
|
1192 |
+
|
1193 |
+
if self.config.pad_token_id is None and batch_size != 1:
|
1194 |
+
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
1195 |
+
if self.config.pad_token_id is None:
|
1196 |
+
sequence_lengths = -1
|
1197 |
+
else:
|
1198 |
+
if input_ids is not None:
|
1199 |
+
# if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
|
1200 |
+
sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
|
1201 |
+
sequence_lengths = sequence_lengths % input_ids.shape[-1]
|
1202 |
+
sequence_lengths = sequence_lengths.to(logits.device)
|
1203 |
+
else:
|
1204 |
+
sequence_lengths = -1
|
1205 |
+
|
1206 |
+
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
|
1207 |
+
|
1208 |
+
loss = None
|
1209 |
+
if labels is not None:
|
1210 |
+
labels = labels.to(logits.device)
|
1211 |
+
if self.config.problem_type is None:
|
1212 |
+
if self.num_labels == 1:
|
1213 |
+
self.config.problem_type = "regression"
|
1214 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
1215 |
+
self.config.problem_type = "single_label_classification"
|
1216 |
+
else:
|
1217 |
+
self.config.problem_type = "multi_label_classification"
|
1218 |
+
|
1219 |
+
if self.config.problem_type == "regression":
|
1220 |
+
loss_fct = MSELoss()
|
1221 |
+
if self.num_labels == 1:
|
1222 |
+
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
|
1223 |
+
else:
|
1224 |
+
loss = loss_fct(pooled_logits, labels)
|
1225 |
+
elif self.config.problem_type == "single_label_classification":
|
1226 |
+
loss_fct = CrossEntropyLoss()
|
1227 |
+
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
|
1228 |
+
elif self.config.problem_type == "multi_label_classification":
|
1229 |
+
loss_fct = BCEWithLogitsLoss()
|
1230 |
+
loss = loss_fct(pooled_logits, labels)
|
1231 |
+
if not return_dict:
|
1232 |
+
output = (pooled_logits,) + transformer_outputs[1:]
|
1233 |
+
return ((loss,) + output) if loss is not None else output
|
1234 |
+
|
1235 |
+
return SequenceClassifierOutputWithPast(
|
1236 |
+
loss=loss,
|
1237 |
+
logits=pooled_logits,
|
1238 |
+
past_key_values=transformer_outputs.past_key_values,
|
1239 |
+
hidden_states=transformer_outputs.hidden_states,
|
1240 |
+
attentions=transformer_outputs.attentions,
|
1241 |
+
)
|
1242 |
+
|
1243 |
+
|
1244 |
+
@add_start_docstrings(
|
1245 |
+
"""
|
1246 |
+
The Llama Model transformer with a span classification head on top for extractive question-answering tasks like
|
1247 |
+
SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
|
1248 |
+
""",
|
1249 |
+
LLAMA_START_DOCSTRING,
|
1250 |
+
)
|
1251 |
+
class LlamaForQuestionAnswering(LlamaPreTrainedModel):
|
1252 |
+
# Copied from transformers.models.bloom.modeling_bloom.BloomForQuestionAnswering.__init__ with Bloom->Llama
|
1253 |
+
def __init__(self, config):
|
1254 |
+
super().__init__(config)
|
1255 |
+
self.transformer = LlamaModel(config)
|
1256 |
+
self.qa_outputs = nn.Linear(config.hidden_size, 2)
|
1257 |
+
|
1258 |
+
# Initialize weights and apply final processing
|
1259 |
+
self.post_init()
|
1260 |
+
|
1261 |
+
def get_input_embeddings(self):
|
1262 |
+
return self.transformer.embed_tokens
|
1263 |
+
|
1264 |
+
def set_input_embeddings(self, value):
|
1265 |
+
self.transformer.embed_tokens = value
|
1266 |
+
|
1267 |
+
@add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
|
1268 |
+
def forward(
|
1269 |
+
self,
|
1270 |
+
input_ids: Optional[torch.LongTensor] = None,
|
1271 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
1272 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1273 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1274 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1275 |
+
start_positions: Optional[torch.LongTensor] = None,
|
1276 |
+
end_positions: Optional[torch.LongTensor] = None,
|
1277 |
+
output_attentions: Optional[bool] = None,
|
1278 |
+
output_hidden_states: Optional[bool] = None,
|
1279 |
+
return_dict: Optional[bool] = None,
|
1280 |
+
) -> Union[Tuple, QuestionAnsweringModelOutput]:
|
1281 |
+
r"""
|
1282 |
+
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1283 |
+
Labels for position (index) of the start of the labelled span for computing the token classification loss.
|
1284 |
+
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
1285 |
+
are not taken into account for computing the loss.
|
1286 |
+
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1287 |
+
Labels for position (index) of the end of the labelled span for computing the token classification loss.
|
1288 |
+
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
1289 |
+
are not taken into account for computing the loss.
|
1290 |
+
"""
|
1291 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1292 |
+
|
1293 |
+
outputs = self.transformer(
|
1294 |
+
input_ids,
|
1295 |
+
attention_mask=attention_mask,
|
1296 |
+
position_ids=position_ids,
|
1297 |
+
past_key_values=past_key_values,
|
1298 |
+
inputs_embeds=inputs_embeds,
|
1299 |
+
output_attentions=output_attentions,
|
1300 |
+
output_hidden_states=output_hidden_states,
|
1301 |
+
return_dict=return_dict,
|
1302 |
+
)
|
1303 |
+
|
1304 |
+
sequence_output = outputs[0]
|
1305 |
+
|
1306 |
+
logits = self.qa_outputs(sequence_output)
|
1307 |
+
start_logits, end_logits = logits.split(1, dim=-1)
|
1308 |
+
start_logits = start_logits.squeeze(-1).contiguous()
|
1309 |
+
end_logits = end_logits.squeeze(-1).contiguous()
|
1310 |
+
|
1311 |
+
total_loss = None
|
1312 |
+
if start_positions is not None and end_positions is not None:
|
1313 |
+
# If we are on multi-GPU, split add a dimension
|
1314 |
+
if len(start_positions.size()) > 1:
|
1315 |
+
start_positions = start_positions.squeeze(-1).to(start_logits.device)
|
1316 |
+
if len(end_positions.size()) > 1:
|
1317 |
+
end_positions = end_positions.squeeze(-1).to(end_logits.device)
|
1318 |
+
# sometimes the start/end positions are outside our model inputs, we ignore these terms
|
1319 |
+
ignored_index = start_logits.size(1)
|
1320 |
+
start_positions = start_positions.clamp(0, ignored_index)
|
1321 |
+
end_positions = end_positions.clamp(0, ignored_index)
|
1322 |
+
|
1323 |
+
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
|
1324 |
+
start_loss = loss_fct(start_logits, start_positions)
|
1325 |
+
end_loss = loss_fct(end_logits, end_positions)
|
1326 |
+
total_loss = (start_loss + end_loss) / 2
|
1327 |
+
|
1328 |
+
if not return_dict:
|
1329 |
+
output = (start_logits, end_logits) + outputs[2:]
|
1330 |
+
return ((total_loss,) + output) if total_loss is not None else output
|
1331 |
+
|
1332 |
+
return QuestionAnsweringModelOutput(
|
1333 |
+
loss=total_loss,
|
1334 |
+
start_logits=start_logits,
|
1335 |
+
end_logits=end_logits,
|
1336 |
+
hidden_states=outputs.hidden_states,
|
1337 |
+
attentions=outputs.attentions,
|
1338 |
+
)
|
special_tokens_map.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "</s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"unk_token": {
|
17 |
+
"content": "<unk>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
}
|
23 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
|
3 |
+
size 499723
|
tokenizer_config.json
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": true,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"bos_token": {
|
5 |
+
"__type": "AddedToken",
|
6 |
+
"content": "<s>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false
|
11 |
+
},
|
12 |
+
"clean_up_tokenization_spaces": false,
|
13 |
+
"eos_token": {
|
14 |
+
"__type": "AddedToken",
|
15 |
+
"content": "</s>",
|
16 |
+
"lstrip": false,
|
17 |
+
"normalized": false,
|
18 |
+
"rstrip": false,
|
19 |
+
"single_word": false
|
20 |
+
},
|
21 |
+
"legacy": false,
|
22 |
+
"model_max_length": 1000000000000000019884624838656,
|
23 |
+
"pad_token": null,
|
24 |
+
"padding_side": "right",
|
25 |
+
"sp_model_kwargs": {},
|
26 |
+
"tokenizer_class": "LlamaTokenizer",
|
27 |
+
"unk_token": {
|
28 |
+
"__type": "AddedToken",
|
29 |
+
"content": "<unk>",
|
30 |
+
"lstrip": false,
|
31 |
+
"normalized": false,
|
32 |
+
"rstrip": false,
|
33 |
+
"single_word": false
|
34 |
+
}
|
35 |
+
}
|