TKDKid1000
commited on
Commit
•
e78c3e0
1
Parent(s):
085b8a6
Update to new phi model and config.
Browse files- config.json +10 -6
- configuration_mixformer_sequential.py → configuration_phi.py +25 -22
- modeling_mixformer_sequential.py +0 -778
- modeling_phi.py +961 -0
config.json
CHANGED
@@ -9,19 +9,23 @@
|
|
9 |
}
|
10 |
},
|
11 |
"architectures": [
|
12 |
-
"
|
13 |
],
|
14 |
"auto_map": {
|
15 |
-
"AutoConfig": "microsoft/phi-1_5--
|
16 |
-
"AutoModelForCausalLM": "microsoft/phi-1_5--
|
17 |
},
|
18 |
"embd_layer": "default",
|
19 |
"embd_pdrop": 0.0,
|
|
|
|
|
|
|
20 |
"initializer_range": 0.02,
|
21 |
"layer_norm_epsilon": 1e-05,
|
22 |
-
"model_type": "
|
23 |
"n_embd": 2048,
|
24 |
"n_head": 32,
|
|
|
25 |
"n_inner": null,
|
26 |
"n_layer": 24,
|
27 |
"n_positions": 2048,
|
@@ -29,8 +33,8 @@
|
|
29 |
"resid_pdrop": 0.0,
|
30 |
"rotary_dim": 32,
|
31 |
"tie_word_embeddings": false,
|
32 |
-
"torch_dtype": "
|
33 |
-
"transformers_version": "4.34.
|
34 |
"use_cache": true,
|
35 |
"vocab_size": 50304
|
36 |
}
|
|
|
9 |
}
|
10 |
},
|
11 |
"architectures": [
|
12 |
+
"PhiForCausalLM"
|
13 |
],
|
14 |
"auto_map": {
|
15 |
+
"AutoConfig": "microsoft/phi-1_5--configuration_phi.PhiConfig",
|
16 |
+
"AutoModelForCausalLM": "microsoft/phi-1_5--modeling_phi.PhiForCausalLM"
|
17 |
},
|
18 |
"embd_layer": "default",
|
19 |
"embd_pdrop": 0.0,
|
20 |
+
"flash_attn": false,
|
21 |
+
"flash_rotary": false,
|
22 |
+
"fused_dense": false,
|
23 |
"initializer_range": 0.02,
|
24 |
"layer_norm_epsilon": 1e-05,
|
25 |
+
"model_type": "phi",
|
26 |
"n_embd": 2048,
|
27 |
"n_head": 32,
|
28 |
+
"n_head_kv": null,
|
29 |
"n_inner": null,
|
30 |
"n_layer": 24,
|
31 |
"n_positions": 2048,
|
|
|
33 |
"resid_pdrop": 0.0,
|
34 |
"rotary_dim": 32,
|
35 |
"tie_word_embeddings": false,
|
36 |
+
"torch_dtype": "float16",
|
37 |
+
"transformers_version": "4.34.1",
|
38 |
"use_cache": true,
|
39 |
"vocab_size": 50304
|
40 |
}
|
configuration_mixformer_sequential.py → configuration_phi.py
RENAMED
@@ -2,43 +2,43 @@
|
|
2 |
# Licensed under the MIT license.
|
3 |
|
4 |
import math
|
5 |
-
from typing import
|
6 |
|
7 |
from transformers import PretrainedConfig
|
8 |
|
9 |
|
10 |
-
class
|
11 |
-
"""
|
12 |
-
|
13 |
-
model_type = "mixformer-sequential"
|
14 |
|
|
|
15 |
attribute_map = {
|
16 |
"max_position_embeddings": "n_positions",
|
17 |
"hidden_size": "n_embd",
|
18 |
"num_attention_heads": "n_head",
|
19 |
"num_hidden_layers": "n_layer",
|
20 |
-
"input_emb_layer": "embd_layer", # `input_emb_layer` key is for backward compatibility
|
21 |
-
"blocks": "architecture", # `blocks` key is for backward compatibility
|
22 |
}
|
23 |
|
24 |
def __init__(
|
25 |
self,
|
26 |
-
vocab_size:
|
27 |
-
n_positions:
|
28 |
-
n_embd:
|
29 |
-
n_layer:
|
30 |
n_inner: Optional[int] = None,
|
31 |
-
n_head:
|
|
|
32 |
rotary_dim: Optional[int] = 32,
|
33 |
activation_function: Optional[str] = "gelu_new",
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
|
|
|
|
42 |
**kwargs
|
43 |
) -> None:
|
44 |
self.vocab_size = int(math.ceil(vocab_size / pad_vocab_size_multiple) * pad_vocab_size_multiple)
|
@@ -47,10 +47,13 @@ class MixFormerSequentialConfig(PretrainedConfig):
|
|
47 |
self.n_layer = n_layer
|
48 |
self.n_inner = n_inner
|
49 |
self.n_head = n_head
|
|
|
50 |
self.rotary_dim = min(rotary_dim, n_embd // n_head)
|
51 |
self.activation_function = activation_function
|
52 |
-
self.
|
53 |
-
self.
|
|
|
|
|
54 |
self.embd_pdrop = embd_pdrop
|
55 |
self.resid_pdrop = resid_pdrop
|
56 |
self.layer_norm_epsilon = layer_norm_epsilon
|
|
|
2 |
# Licensed under the MIT license.
|
3 |
|
4 |
import math
|
5 |
+
from typing import Optional
|
6 |
|
7 |
from transformers import PretrainedConfig
|
8 |
|
9 |
|
10 |
+
class PhiConfig(PretrainedConfig):
|
11 |
+
"""Phi configuration."""
|
|
|
|
|
12 |
|
13 |
+
model_type = "phi"
|
14 |
attribute_map = {
|
15 |
"max_position_embeddings": "n_positions",
|
16 |
"hidden_size": "n_embd",
|
17 |
"num_attention_heads": "n_head",
|
18 |
"num_hidden_layers": "n_layer",
|
|
|
|
|
19 |
}
|
20 |
|
21 |
def __init__(
|
22 |
self,
|
23 |
+
vocab_size: int = 50304,
|
24 |
+
n_positions: int = 2048,
|
25 |
+
n_embd: int = 1024,
|
26 |
+
n_layer: int = 20,
|
27 |
n_inner: Optional[int] = None,
|
28 |
+
n_head: int = 16,
|
29 |
+
n_head_kv: Optional[int] = None,
|
30 |
rotary_dim: Optional[int] = 32,
|
31 |
activation_function: Optional[str] = "gelu_new",
|
32 |
+
flash_attn: bool = False,
|
33 |
+
flash_rotary: bool = False,
|
34 |
+
fused_dense: bool = False,
|
35 |
+
attn_pdrop: float = 0.0,
|
36 |
+
embd_pdrop: float = 0.0,
|
37 |
+
resid_pdrop: float = 0.0,
|
38 |
+
layer_norm_epsilon: float = 1e-5,
|
39 |
+
initializer_range: float = 0.02,
|
40 |
+
tie_word_embeddings: bool = False,
|
41 |
+
pad_vocab_size_multiple: int = 64,
|
42 |
**kwargs
|
43 |
) -> None:
|
44 |
self.vocab_size = int(math.ceil(vocab_size / pad_vocab_size_multiple) * pad_vocab_size_multiple)
|
|
|
47 |
self.n_layer = n_layer
|
48 |
self.n_inner = n_inner
|
49 |
self.n_head = n_head
|
50 |
+
self.n_head_kv = n_head_kv
|
51 |
self.rotary_dim = min(rotary_dim, n_embd // n_head)
|
52 |
self.activation_function = activation_function
|
53 |
+
self.flash_attn = flash_attn
|
54 |
+
self.flash_rotary = flash_rotary
|
55 |
+
self.fused_dense = fused_dense
|
56 |
+
self.attn_pdrop = attn_pdrop
|
57 |
self.embd_pdrop = embd_pdrop
|
58 |
self.resid_pdrop = resid_pdrop
|
59 |
self.layer_norm_epsilon = layer_norm_epsilon
|
modeling_mixformer_sequential.py
DELETED
@@ -1,778 +0,0 @@
|
|
1 |
-
# Copyright (c) Microsoft Corporation.
|
2 |
-
# Licensed under the MIT license.
|
3 |
-
|
4 |
-
# BSD 3-Clause License
|
5 |
-
#
|
6 |
-
# Copyright (c) 2022, Tri Dao, [email protected].
|
7 |
-
# All rights reserved.
|
8 |
-
#
|
9 |
-
# Redistribution and use in source and binary forms, with or without
|
10 |
-
# modification, are permitted provided that the following conditions are met:
|
11 |
-
#
|
12 |
-
# * Redistributions of source code must retain the above copyright notice, this
|
13 |
-
# list of conditions and the following disclaimer.
|
14 |
-
#
|
15 |
-
# * Redistributions in binary form must reproduce the above copyright notice,
|
16 |
-
# this list of conditions and the following disclaimer in the documentation
|
17 |
-
# and/or other materials provided with the distribution.
|
18 |
-
#
|
19 |
-
# * Neither the name of the copyright holder nor the names of its
|
20 |
-
# contributors may be used to endorse or promote products derived from
|
21 |
-
# this software without specific prior written permission.
|
22 |
-
#
|
23 |
-
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
24 |
-
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
25 |
-
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
26 |
-
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
27 |
-
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
28 |
-
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
29 |
-
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
30 |
-
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
31 |
-
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
32 |
-
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
33 |
-
|
34 |
-
from __future__ import annotations
|
35 |
-
|
36 |
-
import math
|
37 |
-
import copy
|
38 |
-
from typing import Any, Dict, Optional, Tuple
|
39 |
-
from dataclasses import dataclass, field
|
40 |
-
|
41 |
-
import torch
|
42 |
-
import torch.nn as nn
|
43 |
-
|
44 |
-
from einops import rearrange
|
45 |
-
from transformers.activations import ACT2FN
|
46 |
-
from transformers import PretrainedConfig, PreTrainedModel
|
47 |
-
from transformers.modeling_outputs import CausalLMOutputWithPast
|
48 |
-
|
49 |
-
from .configuration_mixformer_sequential import MixFormerSequentialConfig
|
50 |
-
|
51 |
-
@dataclass
|
52 |
-
class InferenceParams:
|
53 |
-
"""Inference parameters that are passed to the main model in order
|
54 |
-
to efficienly calculate and store the context during inference.
|
55 |
-
Adapted from https://github.com/Dao-AILab/flash-attention."""
|
56 |
-
max_sequence_len: int
|
57 |
-
max_batch_size: int
|
58 |
-
sequence_len_offset: int = 0
|
59 |
-
batch_size_offset: int = 0
|
60 |
-
key_value_memory_dict: dict = field(default_factory=dict)
|
61 |
-
fused_ft_kernel: bool = False
|
62 |
-
lengths_per_sample: Optional[torch.Tensor] = None
|
63 |
-
|
64 |
-
|
65 |
-
class Embedding(nn.Module):
|
66 |
-
"""Token embedding with dropout."""
|
67 |
-
|
68 |
-
def __init__(self, config: PretrainedConfig) -> None:
|
69 |
-
super().__init__()
|
70 |
-
|
71 |
-
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
|
72 |
-
self.drop = nn.Dropout(config.embd_pdrop)
|
73 |
-
|
74 |
-
def forward(self, input_ids: torch.LongTensor) -> torch.FloatTensor:
|
75 |
-
input_shape = input_ids.size()
|
76 |
-
input_ids = input_ids.view(-1, input_shape[-1])
|
77 |
-
|
78 |
-
hidden_states = self.wte(input_ids)
|
79 |
-
hidden_states = self.drop(hidden_states)
|
80 |
-
|
81 |
-
return hidden_states
|
82 |
-
|
83 |
-
class RotaryEmbedding(nn.Module):
|
84 |
-
"""PyTorch implementation of `flash-attn` RotaryEmbedding layer.
|
85 |
-
Adapted from https://github.com/Dao-AILab/flash-attention."""
|
86 |
-
|
87 |
-
def __init__(
|
88 |
-
self,
|
89 |
-
dim: int,
|
90 |
-
base: Optional[int] = 10000,
|
91 |
-
scale_base: Optional[float] = None,
|
92 |
-
device: Optional[str] = None,
|
93 |
-
**kwargs,
|
94 |
-
) -> None:
|
95 |
-
super().__init__()
|
96 |
-
|
97 |
-
if scale_base is not None:
|
98 |
-
raise NotImplementedError
|
99 |
-
|
100 |
-
# Generate and save the inverse frequency buffer (non-trainable)
|
101 |
-
self.dim = dim
|
102 |
-
self.base = base
|
103 |
-
self.scale_base = scale_base
|
104 |
-
self.device = device
|
105 |
-
|
106 |
-
inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2, device=device, dtype=torch.float32) / dim))
|
107 |
-
self.register_buffer("inv_freq", inv_freq)
|
108 |
-
|
109 |
-
scale = (
|
110 |
-
(torch.arange(0, dim, 2, device=device, dtype=torch.float32) + 0.4 * dim) / (1.4 * dim)
|
111 |
-
if scale_base is not None
|
112 |
-
else None
|
113 |
-
)
|
114 |
-
self.register_buffer("scale", scale)
|
115 |
-
|
116 |
-
self._seq_len_cached = 0
|
117 |
-
self._cos_cached = None
|
118 |
-
self._sin_cached = None
|
119 |
-
self._cos_k_cached = None
|
120 |
-
self._sin_k_cached = None
|
121 |
-
|
122 |
-
def _update_cos_sin_cache(self, x: torch.FloatTensor, seqlen_offset: Optional[int] = 0) -> None:
|
123 |
-
# Reset the tables if the sequence length has changed,
|
124 |
-
# or if we're on a new device (possibly due to tracing for instance)
|
125 |
-
seqlen = x.shape[1] + seqlen_offset
|
126 |
-
|
127 |
-
# Re-generate the inverse frequency buffer if it's not fp32
|
128 |
-
# (for instance if model.half() was called)
|
129 |
-
if self.inv_freq.dtype != "torch.float32":
|
130 |
-
self.inv_freq = 1.0 / (
|
131 |
-
self.base ** (torch.arange(0, self.dim, 2, device=self.device, dtype=torch.float32) / self.dim)
|
132 |
-
)
|
133 |
-
|
134 |
-
if seqlen > self._seq_len_cached or self._cos_cached.device != x.device or self._cos_cached.dtype != x.dtype:
|
135 |
-
self._seq_len_cached = seqlen
|
136 |
-
t = torch.arange(seqlen, device=x.device, dtype=torch.float32)
|
137 |
-
|
138 |
-
# Don't do einsum, it converts fp32 to fp16
|
139 |
-
# freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
140 |
-
freqs = torch.outer(t, self.inv_freq.to(device=t.device, dtype=torch.float32))
|
141 |
-
if self.scale is None:
|
142 |
-
self._cos_cached = torch.cos(freqs).to(x.dtype)
|
143 |
-
self._sin_cached = torch.sin(freqs).to(x.dtype)
|
144 |
-
else:
|
145 |
-
power = (
|
146 |
-
torch.arange(seqlen, dtype=self.scale.dtype, device=self.scale.device) - seqlen // 2
|
147 |
-
) / self.scale_base
|
148 |
-
scale = self.scale.to(device=power.device) ** rearrange(power, "s -> s 1")
|
149 |
-
|
150 |
-
# We want the multiplication by scale to happen in fp32
|
151 |
-
self._cos_cached = (torch.cos(freqs) * scale).to(x.dtype)
|
152 |
-
self._sin_cached = (torch.sin(freqs) * scale).to(x.dtype)
|
153 |
-
self._cos_k_cached = (torch.cos(freqs) / scale).to(x.dtype)
|
154 |
-
self._sin_k_cached = (torch.sin(freqs) / scale).to(x.dtype)
|
155 |
-
|
156 |
-
def apply_rotary_emb_qkv(
|
157 |
-
self,
|
158 |
-
qkv: torch.FloatTensor,
|
159 |
-
sin: torch.FloatTensor,
|
160 |
-
cos: torch.FloatTensor,
|
161 |
-
sin_k: Optional[torch.FloatTensor] = None,
|
162 |
-
cos_k: Optional[torch.FloatTensor] = None,
|
163 |
-
) -> torch.FloatTensor:
|
164 |
-
_, seqlen, three, _, headdim = qkv.shape
|
165 |
-
assert three == 3
|
166 |
-
|
167 |
-
rotary_seqlen, rotary_dim = cos.shape
|
168 |
-
rotary_dim *= 2
|
169 |
-
assert rotary_dim <= headdim
|
170 |
-
assert seqlen <= rotary_seqlen
|
171 |
-
|
172 |
-
cos_k = cos if cos_k is None else cos_k
|
173 |
-
sin_k = sin if sin_k is None else sin_k
|
174 |
-
assert sin.shape == cos_k.shape == sin_k.shape == (rotary_seqlen, rotary_dim // 2)
|
175 |
-
|
176 |
-
q_rot = qkv[:, :, 0, :, :rotary_dim]
|
177 |
-
q_pass = qkv[:, :, 0, :, rotary_dim:]
|
178 |
-
|
179 |
-
k_rot = qkv[:, :, 1, :, :rotary_dim]
|
180 |
-
k_pass = qkv[:, :, 1, :, rotary_dim:]
|
181 |
-
|
182 |
-
# Splits the queries and keys in half
|
183 |
-
q1, q2 = q_rot.chunk(2, dim=-1)
|
184 |
-
k1, k2 = k_rot.chunk(2, dim=-1)
|
185 |
-
c, s = rearrange(cos[:seqlen], "s d -> s 1 d"), rearrange(sin[:seqlen], "s d -> s 1 d")
|
186 |
-
|
187 |
-
# Casts to fp32 are necessary to prevent fp16 overflow issues
|
188 |
-
q1, q2, k1, k2, c, s = [t.to(dtype=torch.float32) for t in [q1, q2, k1, k2, c, s]]
|
189 |
-
|
190 |
-
# Computes the new keys and queries, recasting to original dtype
|
191 |
-
q_rot = torch.cat([q1 * c - q2 * s, q1 * s + q2 * c], axis=-1).to(qkv.dtype)
|
192 |
-
|
193 |
-
k_rot = torch.cat([k1 * c - k2 * s, k1 * s + k2 * c], axis=-1).to(qkv.dtype)
|
194 |
-
|
195 |
-
return torch.cat(
|
196 |
-
[
|
197 |
-
torch.cat([q_rot, q_pass], axis=-1).unsqueeze(2),
|
198 |
-
torch.cat([k_rot, k_pass], axis=-1).unsqueeze(2),
|
199 |
-
qkv[:, :, 2:3, :, :],
|
200 |
-
],
|
201 |
-
axis=2,
|
202 |
-
)
|
203 |
-
|
204 |
-
def forward(self, qkv: torch.Tensor, seqlen_offset: int = 0) -> Tuple[torch.Tensor, torch.Tensor]:
|
205 |
-
"""Perform the forward pass.
|
206 |
-
|
207 |
-
Args:
|
208 |
-
qkv: Query, key and value tensors of shape (batch, seqlen, nheads, headdim) or (batch, seqlen, 3, nheads, headdim).
|
209 |
-
seqlen_offset: Used in generation where the passed `qkv` is only the last token in the batch.
|
210 |
-
|
211 |
-
Returns:
|
212 |
-
New `qkv` and the cached sinusoids.
|
213 |
-
|
214 |
-
"""
|
215 |
-
|
216 |
-
self._update_cos_sin_cache(qkv, seqlen_offset)
|
217 |
-
|
218 |
-
return self.apply_rotary_emb_qkv(qkv, self._sin_cached[seqlen_offset:], self._cos_cached[seqlen_offset:])
|
219 |
-
|
220 |
-
def _update_kv_cache(kv, inference_params, layer_idx):
|
221 |
-
"""kv: (batch_size, seqlen, 2, nheads, head_dim) or (batch_size, 1, 2, nheads, head_dim)
|
222 |
-
Adapted from https://github.com/Dao-AILab/flash-attention."""
|
223 |
-
# Pre-allocate memory for key-values for inference.
|
224 |
-
num_heads, head_dim = kv.shape[-2:]
|
225 |
-
if layer_idx not in inference_params.key_value_memory_dict:
|
226 |
-
kv_cache = torch.empty(
|
227 |
-
inference_params.max_batch_size, inference_params.max_sequence_len, 2,
|
228 |
-
num_heads, head_dim, dtype=kv.dtype, device=kv.device
|
229 |
-
)
|
230 |
-
inference_params.key_value_memory_dict[layer_idx] = kv_cache
|
231 |
-
else:
|
232 |
-
kv_cache = inference_params.key_value_memory_dict[layer_idx]
|
233 |
-
|
234 |
-
# Adjust key and value for inference
|
235 |
-
batch_start = inference_params.batch_size_offset
|
236 |
-
batch_end = batch_start + kv.shape[0]
|
237 |
-
sequence_start = inference_params.sequence_len_offset
|
238 |
-
sequence_end = sequence_start + kv.shape[1]
|
239 |
-
assert batch_end <= (kv_cache.shape[0] if kv_cache is not None else v_cache.shape[0])
|
240 |
-
assert sequence_end <= (kv_cache.shape[1] if kv_cache is not None else v_cache.shape[2])
|
241 |
-
|
242 |
-
assert kv_cache is not None
|
243 |
-
kv_cache[batch_start:batch_end, sequence_start:sequence_end, ...] = kv
|
244 |
-
kv = kv_cache[batch_start:batch_end, :sequence_end, ...]
|
245 |
-
return kv
|
246 |
-
|
247 |
-
|
248 |
-
class MLP(nn.Module):
|
249 |
-
"""Multi-Layer Perceptron.
|
250 |
-
|
251 |
-
Reference:
|
252 |
-
Attention Is All You Need.
|
253 |
-
https://arxiv.org/pdf/1706.03762.pdf.
|
254 |
-
|
255 |
-
"""
|
256 |
-
|
257 |
-
def __init__(self, config: PretrainedConfig, n_inner: Optional[int] = None, act_fn: Optional[str] = None) -> None:
|
258 |
-
super().__init__()
|
259 |
-
|
260 |
-
act_fn = config.activation_function if act_fn is None else act_fn
|
261 |
-
assert act_fn in ACT2FN.keys(), f"`act_fn` must be one of: {ACT2FN.keys()}."
|
262 |
-
|
263 |
-
n_inner = getattr(config, "n_inner", None) if n_inner is None else n_inner
|
264 |
-
n_inner = n_inner if n_inner is not None else 4 * config.n_embd
|
265 |
-
|
266 |
-
self.fc1 = nn.Linear(config.n_embd, n_inner)
|
267 |
-
self.fc2 = nn.Linear(n_inner, config.n_embd)
|
268 |
-
self.act = ACT2FN[act_fn]
|
269 |
-
|
270 |
-
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
|
271 |
-
old_keys = [prefix + "fc_in.weight", prefix + "fc_out.weight", prefix + "fc_in.bias", prefix + "fc_out.bias"]
|
272 |
-
new_keys = [prefix + "fc1.weight", prefix + "fc2.weight", prefix + "fc1.bias", prefix + "fc2.bias"]
|
273 |
-
|
274 |
-
if all(k in state_dict for k in old_keys) and not all(k in state_dict for k in new_keys):
|
275 |
-
# Older version of `MLP` saved with different key names.
|
276 |
-
for old_key, new_key in zip(old_keys, new_keys):
|
277 |
-
state_dict[new_key] = state_dict.pop(old_key)
|
278 |
-
|
279 |
-
return super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
|
280 |
-
|
281 |
-
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
|
282 |
-
hidden_states = self.fc1(hidden_states)
|
283 |
-
hidden_states = self.act(hidden_states)
|
284 |
-
hidden_states = self.fc2(hidden_states)
|
285 |
-
|
286 |
-
return hidden_states
|
287 |
-
|
288 |
-
|
289 |
-
class FusedMLP(nn.Module):
|
290 |
-
"""Fused Multi-Layer Perceptron from `flash-attn`.
|
291 |
-
|
292 |
-
Reference:
|
293 |
-
https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/ops/fused_dense.py.
|
294 |
-
|
295 |
-
"""
|
296 |
-
def __init__(self, config: PretrainedConfig, n_inner: Optional[int] = None, act_fn: Optional[str] = None,
|
297 |
-
raise_on_missing: bool = False) -> None:
|
298 |
-
super().__init__()
|
299 |
-
|
300 |
-
act_fn = config.activation_function if act_fn is None else act_fn
|
301 |
-
assert act_fn in ACT2FN.keys(), f"`act_fn` must be one of: {ACT2FN.keys()}."
|
302 |
-
|
303 |
-
n_inner = getattr(config, "n_inner", None) if n_inner is None else n_inner
|
304 |
-
n_inner = n_inner if n_inner is not None else 4 * config.n_embd
|
305 |
-
|
306 |
-
gelu_activations = ["gelu_new", "gelu_fast", "gelu_approx"]
|
307 |
-
activation = "gelu_approx" if act_fn in gelu_activations else "relu"
|
308 |
-
|
309 |
-
self.mlp = MLP(config, n_inner=n_inner, act_fn=act_fn)
|
310 |
-
|
311 |
-
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
|
312 |
-
return self.mlp(hidden_states)
|
313 |
-
|
314 |
-
class SelfAttention(nn.Module):
|
315 |
-
"""Implement the scaled dot product attention with softmax.
|
316 |
-
Adapted from https://github.com/Dao-AILab/flash-attention.
|
317 |
-
Arguments
|
318 |
-
---------
|
319 |
-
softmax_scale: The temperature to use for the softmax attention.
|
320 |
-
(default: 1/sqrt(d_keys) where d_keys is computed at
|
321 |
-
runtime)
|
322 |
-
attention_dropout: The dropout rate to apply to the attention
|
323 |
-
(default: 0.0)
|
324 |
-
"""
|
325 |
-
def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0):
|
326 |
-
super().__init__()
|
327 |
-
self.causal = causal
|
328 |
-
self.softmax_scale = softmax_scale
|
329 |
-
self.drop = nn.Dropout(attention_dropout)
|
330 |
-
|
331 |
-
def forward(self, qkv, causal=None, key_padding_mask=None):
|
332 |
-
"""Implements the multihead softmax attention.
|
333 |
-
Arguments
|
334 |
-
---------
|
335 |
-
qkv: The tensor containing the query, key, and value. (B, S, 3, H, D)
|
336 |
-
causal: if passed, will override self.causal
|
337 |
-
key_padding_mask: boolean mask to apply to the attention weights. True means to keep,
|
338 |
-
False means to mask out. (B, S)
|
339 |
-
"""
|
340 |
-
batch_size, seqlen = qkv.shape[0], qkv.shape[1]
|
341 |
-
causal = self.causal if causal is None else causal
|
342 |
-
q, k, v = qkv.unbind(dim=2)
|
343 |
-
softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1])
|
344 |
-
scores = torch.einsum('bthd,bshd->bhts', q, k * softmax_scale)
|
345 |
-
if key_padding_mask is not None:
|
346 |
-
padding_mask = torch.full((batch_size, seqlen), -10000.0, dtype=scores.dtype,
|
347 |
-
device=scores.device)
|
348 |
-
padding_mask.masked_fill_(key_padding_mask, 0.0)
|
349 |
-
# TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
|
350 |
-
scores = scores + rearrange(padding_mask, 'b s -> b 1 1 s')
|
351 |
-
if causal:
|
352 |
-
# "triu_tril_cuda_template" not implemented for 'BFloat16'
|
353 |
-
# So we have to construct the mask in float
|
354 |
-
causal_mask = torch.triu(torch.full((seqlen, seqlen), -10000.0, device=scores.device), 1)
|
355 |
-
# TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
|
356 |
-
scores = scores + causal_mask.to(dtype=scores.dtype)
|
357 |
-
attention = torch.softmax(scores, dim=-1, dtype=v.dtype)
|
358 |
-
attention_drop = self.drop(attention)
|
359 |
-
output = torch.einsum('bhts,bshd->bthd', attention_drop, v)
|
360 |
-
return output
|
361 |
-
|
362 |
-
|
363 |
-
class CrossAttention(nn.Module):
|
364 |
-
"""Implement the scaled dot product attention with softmax.
|
365 |
-
Adapted from https://github.com/Dao-AILab/flash-attention.
|
366 |
-
Arguments
|
367 |
-
---------
|
368 |
-
softmax_scale: The temperature to use for the softmax attention.
|
369 |
-
(default: 1/sqrt(d_keys) where d_keys is computed at
|
370 |
-
runtime)
|
371 |
-
attention_dropout: The dropout rate to apply to the attention
|
372 |
-
(default: 0.0)
|
373 |
-
"""
|
374 |
-
def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0):
|
375 |
-
super().__init__()
|
376 |
-
self.causal = causal
|
377 |
-
self.softmax_scale = softmax_scale
|
378 |
-
self.drop = nn.Dropout(attention_dropout)
|
379 |
-
|
380 |
-
def forward(self, q, kv, causal=None, key_padding_mask=None):
|
381 |
-
"""Implements the multihead softmax attention.
|
382 |
-
Arguments
|
383 |
-
---------
|
384 |
-
q: The tensor containing the query. (B, Sq, H, D)
|
385 |
-
kv: The tensor containing the key and value. (B, Sk, 2, H, D)
|
386 |
-
causal: if passed, will override self.causal
|
387 |
-
key_padding_mask: boolean mask to apply to the attention weights. True means to keep,
|
388 |
-
False means to mask out. (B, Sk)
|
389 |
-
"""
|
390 |
-
batch_size, seqlen_q = q.shape[0], q.shape[1]
|
391 |
-
causal = self.causal if causal is None else causal
|
392 |
-
seqlen_k = kv.shape[1]
|
393 |
-
assert kv.shape[0] == batch_size and kv.shape[3] == q.shape[2] and kv.shape[4] == q.shape[3]
|
394 |
-
k, v = kv.unbind(dim=2)
|
395 |
-
softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1])
|
396 |
-
scores = torch.einsum('bthd,bshd->bhts', q, k * softmax_scale)
|
397 |
-
if key_padding_mask is not None:
|
398 |
-
padding_mask = torch.full((batch_size, seqlen_k), -10000.0, dtype=scores.dtype,
|
399 |
-
device=scores.device)
|
400 |
-
padding_mask.masked_fill_(key_padding_mask, 0.0)
|
401 |
-
# TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
|
402 |
-
scores = scores + rearrange(padding_mask, 'b s -> b 1 1 s')
|
403 |
-
if causal:
|
404 |
-
# "triu_tril_cuda_template" not implemented for 'BFloat16'
|
405 |
-
# So we have to construct the mask in float
|
406 |
-
causal_mask = torch.triu(torch.full((seqlen_q, seqlen_k), -10000.0,
|
407 |
-
device=scores.device), 1)
|
408 |
-
# TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
|
409 |
-
scores = scores + causal_mask.to(dtype=scores.dtype)
|
410 |
-
attention = torch.softmax(scores, dim=-1, dtype=v.dtype)
|
411 |
-
attention_drop = self.drop(attention)
|
412 |
-
output = torch.einsum('bhts,bshd->bthd', attention_drop, v)
|
413 |
-
return output
|
414 |
-
|
415 |
-
def find_mha_dims(
|
416 |
-
config: PretrainedConfig, n_head: Optional[int] = None, head_dim: Optional[int] = None
|
417 |
-
) -> Tuple[int, int]:
|
418 |
-
"""Validate and return the number of heads and head dimension for multi-head attention.
|
419 |
-
|
420 |
-
Args:
|
421 |
-
config: Model configuration.
|
422 |
-
n_head: Number of heads.
|
423 |
-
head_dim: Head dimension.
|
424 |
-
|
425 |
-
Returns:
|
426 |
-
Number of heads and head dimension.
|
427 |
-
|
428 |
-
"""
|
429 |
-
|
430 |
-
assert all(
|
431 |
-
hasattr(config, attr) for attr in ["n_embd", "n_head"]
|
432 |
-
), "`config` must have `n_embd` and `n_head` attributes."
|
433 |
-
|
434 |
-
if head_dim is None:
|
435 |
-
assert (
|
436 |
-
config.n_embd % config.n_head == 0
|
437 |
-
), f"Hidden size ({config.n_embd}) must be divisible by the number of heads ({config.n_head})."
|
438 |
-
|
439 |
-
if n_head is None and head_dim is None:
|
440 |
-
head_dim = config.n_embd // config.n_head
|
441 |
-
n_head = config.n_head
|
442 |
-
elif n_head is None or head_dim is None:
|
443 |
-
raise ValueError("`n_head` and `head_dim` must be both specified or `None`.")
|
444 |
-
|
445 |
-
return n_head, head_dim
|
446 |
-
|
447 |
-
|
448 |
-
class MHA(nn.Module):
|
449 |
-
"""Multi-head attention layer.
|
450 |
-
Adapted from https://github.com/Dao-AILab/flash-attention."""
|
451 |
-
|
452 |
-
def __init__(
|
453 |
-
self,
|
454 |
-
config: PretrainedConfig,
|
455 |
-
rotary_dim: Optional[int] = None,
|
456 |
-
n_head: Optional[int] = None,
|
457 |
-
head_dim: Optional[int] = None,
|
458 |
-
bias: Optional[bool] = True,
|
459 |
-
dropout: Optional[float] = 0.0,
|
460 |
-
softmax_scale: Optional[float] = None,
|
461 |
-
causal: Optional[bool] = True,
|
462 |
-
layer_idx: Optional[int] = None,
|
463 |
-
rotary_emb_scale_base: Optional[float] = None,
|
464 |
-
return_residual: Optional[bool] = False,
|
465 |
-
checkpointing: Optional[bool] = False,
|
466 |
-
device: Optional[str] = None,
|
467 |
-
dtype: Optional[torch.dtype] = None,
|
468 |
-
fused_dense: Optional[bool] = True,
|
469 |
-
flash_attn: Optional[bool] = True,
|
470 |
-
cutlass_attn: Optional[bool] = False,
|
471 |
-
flash_rotary: Optional[bool] = True,
|
472 |
-
raise_on_missing: Optional[bool] = False
|
473 |
-
) -> None:
|
474 |
-
super().__init__()
|
475 |
-
|
476 |
-
factory_kwargs = {"device": device, "dtype": dtype}
|
477 |
-
n_head, head_dim = find_mha_dims(config, n_head, head_dim)
|
478 |
-
|
479 |
-
self.hidden_size = config.n_embd
|
480 |
-
self.n_head = n_head
|
481 |
-
self.head_dim = head_dim
|
482 |
-
self.op_size = n_head * head_dim
|
483 |
-
|
484 |
-
self.causal = causal
|
485 |
-
self.layer_idx = layer_idx
|
486 |
-
self.rotary_emb_dim = rotary_dim if rotary_dim is not None else getattr(config, "rotary_dim", 0)
|
487 |
-
self.fused_dense = fused_dense
|
488 |
-
self.flash_attn = flash_attn
|
489 |
-
self.cutlass_attn = cutlass_attn
|
490 |
-
self.flash_rotary = flash_rotary
|
491 |
-
self.return_residual = return_residual
|
492 |
-
self.checkpointing = checkpointing
|
493 |
-
|
494 |
-
if self.rotary_emb_dim > 0:
|
495 |
-
rotary_kwargs = {"device": device}
|
496 |
-
if rotary_emb_scale_base is not None and rotary_emb_scale_base > 0.0:
|
497 |
-
rotary_kwargs["scale_base"] = rotary_emb_scale_base
|
498 |
-
|
499 |
-
self.rotary_emb = RotaryEmbedding(self.rotary_emb_dim, **rotary_kwargs)
|
500 |
-
else:
|
501 |
-
pass
|
502 |
-
|
503 |
-
self.Wqkv = nn.Linear(self.hidden_size, 3 * self.op_size, bias=bias, **factory_kwargs)
|
504 |
-
self.out_proj = nn.Linear(self.op_size, self.hidden_size, bias=bias, **factory_kwargs)
|
505 |
-
|
506 |
-
self.inner_attn = SelfAttention(causal=causal, softmax_scale=softmax_scale, attention_dropout=dropout)
|
507 |
-
self.inner_cross_attn = CrossAttention(causal=causal, softmax_scale=softmax_scale, attention_dropout=dropout)
|
508 |
-
|
509 |
-
def _update_kv_cache(self, kv: torch.FloatTensor, inference_params: InferenceParams) -> None:
|
510 |
-
"""kv: (batch_size, seqlen, 2, nheads, head_dim) or (batch_size, 1, 2, nheads, head_dim)
|
511 |
-
Adapted from https://github.com/Dao-AILab/flash-attention."""
|
512 |
-
|
513 |
-
assert self.layer_idx is not None, "Generation requires layer_idx in the constructor"
|
514 |
-
|
515 |
-
return _update_kv_cache(kv, inference_params, self.layer_idx)
|
516 |
-
|
517 |
-
def forward(
|
518 |
-
self,
|
519 |
-
x: torch.FloatTensor,
|
520 |
-
x_kv: Optional[torch.FloatTensor] = None,
|
521 |
-
key_padding_mask: Optional[torch.BoolTensor] = None,
|
522 |
-
cu_seqlens: Optional[torch.LongTensor] = None,
|
523 |
-
max_seqlen: Optional[int] = None,
|
524 |
-
mixer_subset: Optional[torch.LongTensor] = None,
|
525 |
-
past_cache: Optional[InferenceParams] = None,
|
526 |
-
**kwargs
|
527 |
-
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
|
528 |
-
"""Perform the forward pass.
|
529 |
-
|
530 |
-
Args:
|
531 |
-
x: (batch, seqlen, hidden_dim) (where hidden_dim = num heads * head dim) if
|
532 |
-
cu_seqlens is None and max_seqlen is None, else (total, hidden_dim) where total
|
533 |
-
is the is the sum of the sequence lengths in the batch.
|
534 |
-
x_kv: (batch, seqlen, hidden_dim), only applicable for cross-attention. If None, use x.
|
535 |
-
key_padding_mask: boolean mask, True means to keep, False means to mask out.
|
536 |
-
(batch, seqlen). Only applicable when not using FlashAttention.
|
537 |
-
cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
|
538 |
-
of the sequences in the batch, used to index into x. Only applicable when using
|
539 |
-
FlashAttention.
|
540 |
-
max_seqlen: int. Maximum sequence length in the batch.
|
541 |
-
mixer_subset: for cross-attention only. If not None, will take a subset of x
|
542 |
-
before applying the query projection. Useful for e.g., ViT where we only care
|
543 |
-
about the CLS token in the last layer.
|
544 |
-
past_cache: For generation only.
|
545 |
-
|
546 |
-
Returns:
|
547 |
-
(batch, seqlen, hidden_dim) if cu_seqlens is None and max_seqlen is None,
|
548 |
-
else (total, hidden_dim) where total is the is the sum of the sequence lengths
|
549 |
-
in the batch.
|
550 |
-
|
551 |
-
"""
|
552 |
-
|
553 |
-
if cu_seqlens is not None:
|
554 |
-
assert max_seqlen is not None
|
555 |
-
assert key_padding_mask is None
|
556 |
-
assert self.flash_attn
|
557 |
-
assert self.rotary_emb_dim == 0
|
558 |
-
|
559 |
-
if key_padding_mask is not None:
|
560 |
-
assert cu_seqlens is None
|
561 |
-
assert max_seqlen is None
|
562 |
-
assert not self.flash_attn
|
563 |
-
|
564 |
-
if past_cache is not None:
|
565 |
-
assert key_padding_mask is None
|
566 |
-
assert cu_seqlens is None and max_seqlen is None
|
567 |
-
|
568 |
-
attn_kwargs = {"key_padding_mask": key_padding_mask}
|
569 |
-
|
570 |
-
assert x_kv is None and mixer_subset is None
|
571 |
-
|
572 |
-
qkv = self.Wqkv(x)
|
573 |
-
qkv = rearrange(qkv, "... (three h d) -> ... three h d", three=3, d=self.head_dim)
|
574 |
-
|
575 |
-
if past_cache is None:
|
576 |
-
if self.rotary_emb_dim > 0:
|
577 |
-
qkv = self.rotary_emb(qkv)
|
578 |
-
context = self.inner_attn(qkv, **attn_kwargs)
|
579 |
-
|
580 |
-
else:
|
581 |
-
if self.rotary_emb_dim > 0:
|
582 |
-
qkv = self.rotary_emb(qkv, seqlen_offset=past_cache.sequence_len_offset)
|
583 |
-
q = qkv[:, :, 0]
|
584 |
-
kv = self._update_kv_cache(qkv[:, :, 1:], past_cache)
|
585 |
-
# If we're processing the prompt, causal=None (use self.causal).
|
586 |
-
# If we're decoding, then causal=False.
|
587 |
-
causal = None if past_cache.sequence_len_offset == 0 else False
|
588 |
-
context = self.inner_cross_attn(q, kv, causal=causal)
|
589 |
-
|
590 |
-
out = rearrange(context, "... h d -> ... (h d)")
|
591 |
-
out = self.out_proj(out)
|
592 |
-
|
593 |
-
return out if not self.return_residual else (out, x)
|
594 |
-
|
595 |
-
class ParallelBlock(nn.Module):
|
596 |
-
"""Parallel block.
|
597 |
-
|
598 |
-
This block applies parallel mixer and MLP layers to the input (used in GPT-J and CodeGen).
|
599 |
-
|
600 |
-
"""
|
601 |
-
|
602 |
-
def __init__(
|
603 |
-
self,
|
604 |
-
config: PretrainedConfig,
|
605 |
-
mixer: Optional[Dict[str, Any]] = None,
|
606 |
-
mlp: Optional[Dict[str, Any]] = None,
|
607 |
-
block_idx: Optional[int] = None,
|
608 |
-
) -> None:
|
609 |
-
super().__init__()
|
610 |
-
|
611 |
-
self.ln = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
|
612 |
-
self.resid_dropout = nn.Dropout(config.resid_pdrop)
|
613 |
-
self.block_idx = block_idx
|
614 |
-
|
615 |
-
self.mixer = MHA(config=config, **mixer, layer_idx=block_idx)
|
616 |
-
mlp_cls = mlp.pop('mlp_cls')
|
617 |
-
if mlp_cls == 'fused_mlp':
|
618 |
-
self.mlp = FusedMLP(config=config, **mlp)
|
619 |
-
else:
|
620 |
-
self.mlp = MLP(config=config, **mlp)
|
621 |
-
|
622 |
-
def forward(self, hidden_states: torch.FloatTensor,
|
623 |
-
past_cache: Optional[torch.FloatTensor] = None) -> torch.FloatTensor:
|
624 |
-
residual = hidden_states
|
625 |
-
hidden_states = self.ln(hidden_states)
|
626 |
-
|
627 |
-
attn_outputs = self.mixer(hidden_states, past_cache=past_cache)
|
628 |
-
if isinstance(attn_outputs, tuple):
|
629 |
-
attn_outputs = attn_outputs[0]
|
630 |
-
|
631 |
-
attn_outputs = self.resid_dropout(attn_outputs)
|
632 |
-
feed_forward_hidden_states = self.resid_dropout(self.mlp(hidden_states))
|
633 |
-
|
634 |
-
hidden_states = attn_outputs + feed_forward_hidden_states + residual
|
635 |
-
|
636 |
-
return hidden_states
|
637 |
-
|
638 |
-
class CausalLMHead(nn.Module):
|
639 |
-
"""Causal Language Modeling head.
|
640 |
-
|
641 |
-
Reference:
|
642 |
-
Improving Language Understanding by Generative Pre-Training.
|
643 |
-
https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf.
|
644 |
-
|
645 |
-
"""
|
646 |
-
|
647 |
-
def __init__(self, config: PretrainedConfig) -> None:
|
648 |
-
super().__init__()
|
649 |
-
|
650 |
-
self.ln = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
|
651 |
-
self.linear = nn.Linear(config.n_embd, config.vocab_size)
|
652 |
-
|
653 |
-
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
|
654 |
-
hidden_states = self.ln(hidden_states)
|
655 |
-
logits = self.linear(hidden_states).to(torch.float32)
|
656 |
-
|
657 |
-
return logits
|
658 |
-
|
659 |
-
|
660 |
-
class CausalLMLoss(nn.Module):
|
661 |
-
"""Causal Language Modeling loss.
|
662 |
-
|
663 |
-
Reference:
|
664 |
-
Improving Language Understanding by Generative Pre-Training.
|
665 |
-
https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf.
|
666 |
-
|
667 |
-
"""
|
668 |
-
|
669 |
-
def __init__(self, shift_labels: Optional[bool] = True) -> None:
|
670 |
-
super().__init__()
|
671 |
-
|
672 |
-
self.shift_labels = shift_labels
|
673 |
-
self.loss_fct = nn.CrossEntropyLoss()
|
674 |
-
|
675 |
-
def forward(self, logits: torch.FloatTensor, labels: torch.LongTensor) -> torch.FloatTensor:
|
676 |
-
if self.shift_labels:
|
677 |
-
logits = logits[..., :-1, :].contiguous()
|
678 |
-
labels = labels[..., 1:].contiguous()
|
679 |
-
|
680 |
-
loss = self.loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1))
|
681 |
-
|
682 |
-
return loss
|
683 |
-
|
684 |
-
class MixFormerSequentialPreTrainedModel(PreTrainedModel):
|
685 |
-
"""MixFormer (sequential for DeepSpeed) pre-trained model."""
|
686 |
-
|
687 |
-
config_class = MixFormerSequentialConfig
|
688 |
-
base_model_prefix = "transformer"
|
689 |
-
supports_gradient_checkpointing = True
|
690 |
-
|
691 |
-
def __init__(self, *inputs, **kwargs) -> None:
|
692 |
-
super().__init__(*inputs, **kwargs)
|
693 |
-
|
694 |
-
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs) -> Dict[str, Any]:
|
695 |
-
if "use_cache" in kwargs and not kwargs["use_cache"]:
|
696 |
-
return {"input_ids": input_ids}
|
697 |
-
|
698 |
-
if past_key_values is None or not (isinstance(past_key_values, InferenceParams)):
|
699 |
-
past_key_values = InferenceParams(
|
700 |
-
max_batch_size=input_ids.shape[0],
|
701 |
-
max_sequence_len=self.config.n_positions,
|
702 |
-
sequence_len_offset=0,
|
703 |
-
batch_size_offset=0,
|
704 |
-
fused_ft_kernel=False,
|
705 |
-
key_value_memory_dict={},
|
706 |
-
)
|
707 |
-
else:
|
708 |
-
# assume past_key_values has cached all but last token in input_ids
|
709 |
-
past_key_values.sequence_len_offset = len(input_ids[0]) - 1
|
710 |
-
input_ids = input_ids[:, -1].unsqueeze(-1)
|
711 |
-
|
712 |
-
return {"input_ids": input_ids, "past_key_values": past_key_values, **kwargs}
|
713 |
-
|
714 |
-
|
715 |
-
class MixFormerSequentialForCausalLM(MixFormerSequentialPreTrainedModel):
|
716 |
-
"""MixFormer (sequential for DeepSpeed) for Causal Language Modeling."""
|
717 |
-
|
718 |
-
_keys_to_ignore_on_load_missing = [""]
|
719 |
-
_keys_to_ignore_on_load_unexpected = [r"layers\.\d+\.mlp.(fc_in|fc_out)\.(weight|bias)"]
|
720 |
-
|
721 |
-
def __init__(self, config: MixFormerSequentialConfig) -> None:
|
722 |
-
super().__init__(config)
|
723 |
-
|
724 |
-
modules = [Embedding(config)]
|
725 |
-
block_config = config.architecture
|
726 |
-
|
727 |
-
if not isinstance(block_config, list):
|
728 |
-
block_config = [block_config for _ in range(config.n_layer)]
|
729 |
-
|
730 |
-
if config.n_layer != len(block_config):
|
731 |
-
config.n_layer = len(block_config)
|
732 |
-
|
733 |
-
for block_idx, block in enumerate(block_config):
|
734 |
-
# `block_cls` with `legacy` value is for backward compatibility
|
735 |
-
# `path` key is for backward compatibility
|
736 |
-
block = copy.deepcopy(block) or {"block_cls": "parallel"}
|
737 |
-
block_cls = block.pop("path", None) or block.pop("block_cls", None)
|
738 |
-
|
739 |
-
block["block_idx"] = block_idx
|
740 |
-
modules.append(ParallelBlock(config, **block))
|
741 |
-
|
742 |
-
modules.append(CausalLMHead(config))
|
743 |
-
|
744 |
-
self.layers = nn.Sequential(*modules)
|
745 |
-
self.loss = CausalLMLoss()
|
746 |
-
|
747 |
-
self.post_init()
|
748 |
-
|
749 |
-
def get_input_embeddings(self) -> nn.Embedding:
|
750 |
-
return self.layers[0].wte
|
751 |
-
|
752 |
-
def set_input_embeddings(self, new_embeddings: nn.Embedding) -> None:
|
753 |
-
self.layers[0].wte = new_embeddings
|
754 |
-
|
755 |
-
def get_output_embeddings(self) -> nn.Linear:
|
756 |
-
return self.layers[-1].linear
|
757 |
-
|
758 |
-
def set_output_embeddings(self, new_embeddings: nn.Linear) -> None:
|
759 |
-
self.layers[-1].linear = new_embeddings
|
760 |
-
|
761 |
-
def forward(
|
762 |
-
self, input_ids: torch.LongTensor, labels: Optional[torch.LongTensor] = None,
|
763 |
-
past_key_values: Optional[torch.FloatTensor] = None, **kwargs
|
764 |
-
) -> CausalLMOutputWithPast:
|
765 |
-
|
766 |
-
if not past_key_values:
|
767 |
-
lm_logits = self.layers(input_ids)
|
768 |
-
else:
|
769 |
-
hidden_layer = self.layers[0](input_ids)
|
770 |
-
for module in self.layers[1:-1]:
|
771 |
-
hidden_layer = module(hidden_layer, past_cache=past_key_values)
|
772 |
-
lm_logits = self.layers[-1](hidden_layer)
|
773 |
-
|
774 |
-
loss = None
|
775 |
-
if labels is not None:
|
776 |
-
loss = self.loss(lm_logits, labels)
|
777 |
-
|
778 |
-
return CausalLMOutputWithPast(loss=loss, logits=lm_logits, past_key_values=past_key_values)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
modeling_phi.py
ADDED
@@ -0,0 +1,961 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# Licensed under the MIT license.
|
3 |
+
#
|
4 |
+
# Copyright (c) 2022, Tri Dao, [email protected].
|
5 |
+
# Licensed under the BSD 3-Clause License.
|
6 |
+
|
7 |
+
from __future__ import annotations
|
8 |
+
|
9 |
+
import math
|
10 |
+
from dataclasses import dataclass, field
|
11 |
+
from typing import Any, Dict, Optional, Tuple, Union
|
12 |
+
|
13 |
+
import torch
|
14 |
+
import torch.nn as nn
|
15 |
+
from einops import rearrange, repeat
|
16 |
+
from transformers import PretrainedConfig, PreTrainedModel
|
17 |
+
from transformers.activations import ACT2FN
|
18 |
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
19 |
+
|
20 |
+
from .configuration_phi import PhiConfig
|
21 |
+
|
22 |
+
try:
|
23 |
+
from flash_attn.bert_padding import pad_input, unpad_input
|
24 |
+
from flash_attn.layers.rotary import RotaryEmbedding as FlashRotaryEmbedding
|
25 |
+
from flash_attn.modules.mha import FlashCrossAttention, FlashSelfAttention
|
26 |
+
from flash_attn.ops.fused_dense import FusedDense
|
27 |
+
except:
|
28 |
+
pad_input, unpad_input = None, None
|
29 |
+
FlashRotaryEmbedding = None
|
30 |
+
FlashSelfAttention, FlashCrossAttention = None, None
|
31 |
+
FusedDense = None
|
32 |
+
|
33 |
+
|
34 |
+
@dataclass
|
35 |
+
class InferenceParams:
|
36 |
+
"""Inference parameters passed to model to efficiently calculate
|
37 |
+
and store context during inference.
|
38 |
+
|
39 |
+
Reference:
|
40 |
+
https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/utils/generation.py.
|
41 |
+
|
42 |
+
Args:
|
43 |
+
max_seqlen: Maximum sequence length.
|
44 |
+
max_batch_size: Maximum batch size.
|
45 |
+
seqlen_offset: Sequence length offset.
|
46 |
+
batch_size_offset: Batch size offset.
|
47 |
+
key_value_memory_dict: Key value memory dictionary.
|
48 |
+
lengths_per_sample: Lengths per sample.
|
49 |
+
|
50 |
+
"""
|
51 |
+
|
52 |
+
max_seqlen: int = field(metadata={"help": "Maximum sequence length."})
|
53 |
+
|
54 |
+
max_batch_size: int = field(metadata={"help": "Maximum batch size."})
|
55 |
+
|
56 |
+
seqlen_offset: int = field(default=0, metadata={"help": "Sequence length offset."})
|
57 |
+
|
58 |
+
batch_size_offset: int = field(default=0, metadata={"help": "Batch size offset."})
|
59 |
+
|
60 |
+
key_value_memory_dict: Dict[str, Any] = field(
|
61 |
+
default_factory=dict, metadata={"help": "Key value memory dictionary."}
|
62 |
+
)
|
63 |
+
|
64 |
+
lengths_per_sample: torch.Tensor = field(default=None, metadata={"help": "Lengths per sample."})
|
65 |
+
|
66 |
+
|
67 |
+
class Embedding(nn.Module):
|
68 |
+
"""Token embedding with dropout."""
|
69 |
+
|
70 |
+
def __init__(self, config: PretrainedConfig) -> None:
|
71 |
+
super().__init__()
|
72 |
+
|
73 |
+
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
|
74 |
+
self.drop = nn.Dropout(config.embd_pdrop)
|
75 |
+
|
76 |
+
def forward(self, input_ids: torch.LongTensor) -> torch.FloatTensor:
|
77 |
+
input_shape = input_ids.size()
|
78 |
+
input_ids = input_ids.view(-1, input_shape[-1])
|
79 |
+
|
80 |
+
hidden_states = self.wte(input_ids)
|
81 |
+
hidden_states = self.drop(hidden_states)
|
82 |
+
|
83 |
+
return hidden_states
|
84 |
+
|
85 |
+
|
86 |
+
def _apply_rotary_emb(
|
87 |
+
x: torch.FloatTensor,
|
88 |
+
cos: torch.FloatTensor,
|
89 |
+
sin: torch.FloatTensor,
|
90 |
+
) -> torch.FloatTensor:
|
91 |
+
_, seqlen, _, _ = x.shape
|
92 |
+
_, rotary_dim = cos.shape
|
93 |
+
rotary_dim *= 2
|
94 |
+
|
95 |
+
x_rot = x[:, :, :, :rotary_dim]
|
96 |
+
x_pass = x[:, :, :, rotary_dim:]
|
97 |
+
|
98 |
+
x1, x2 = x_rot.chunk(2, dim=-1)
|
99 |
+
c, s = rearrange(cos[:seqlen], "s d -> s 1 d"), rearrange(sin[:seqlen], "s d -> s 1 d")
|
100 |
+
x1, x2, c, s = [t.to(dtype=torch.float32) for t in [x1, x2, c, s]]
|
101 |
+
|
102 |
+
x_rot = torch.cat([x1 * c - x2 * s, x1 * s + x2 * c], axis=-1).to(x.dtype)
|
103 |
+
|
104 |
+
return torch.cat([x_rot, x_pass], axis=-1)
|
105 |
+
|
106 |
+
|
107 |
+
def _apply_rotary_emb_kv(
|
108 |
+
kv: torch.FloatTensor,
|
109 |
+
cos: torch.FloatTensor,
|
110 |
+
sin: torch.FloatTensor,
|
111 |
+
cos_k: Optional[torch.FloatTensor] = None,
|
112 |
+
sin_k: Optional[torch.FloatTensor] = None,
|
113 |
+
) -> torch.FloatTensor:
|
114 |
+
_, seqlen, _, _, _ = kv.shape
|
115 |
+
_, rotary_dim = cos.shape
|
116 |
+
rotary_dim *= 2
|
117 |
+
|
118 |
+
k_rot = kv[:, :, 0, :, :rotary_dim]
|
119 |
+
k_pass = kv[:, :, 0, :, rotary_dim:]
|
120 |
+
|
121 |
+
k1, k2 = k_rot.chunk(2, dim=-1)
|
122 |
+
c, s = rearrange(cos[:seqlen], "s d -> s 1 d"), rearrange(sin[:seqlen], "s d -> s 1 d")
|
123 |
+
k1, k2, c, s = [t.to(dtype=torch.float32) for t in [k1, k2, c, s]]
|
124 |
+
|
125 |
+
k_rot = torch.cat([k1 * c - k2 * s, k1 * s + k2 * c], axis=-1).to(kv.dtype)
|
126 |
+
|
127 |
+
return torch.cat(
|
128 |
+
[
|
129 |
+
torch.cat([k_rot, k_pass], axis=-1).unsqueeze(2),
|
130 |
+
kv[:, :, 1:2, :, :],
|
131 |
+
],
|
132 |
+
axis=2,
|
133 |
+
)
|
134 |
+
|
135 |
+
|
136 |
+
def _apply_rotary_emb_qkv(
|
137 |
+
qkv: torch.FloatTensor,
|
138 |
+
cos: torch.FloatTensor,
|
139 |
+
sin: torch.FloatTensor,
|
140 |
+
cos_k: Optional[torch.FloatTensor] = None,
|
141 |
+
sin_k: Optional[torch.FloatTensor] = None,
|
142 |
+
) -> torch.FloatTensor:
|
143 |
+
_, seqlen, _, _, _ = qkv.shape
|
144 |
+
_, rotary_dim = cos.shape
|
145 |
+
rotary_dim *= 2
|
146 |
+
|
147 |
+
q_rot = qkv[:, :, 0, :, :rotary_dim]
|
148 |
+
q_pass = qkv[:, :, 0, :, rotary_dim:]
|
149 |
+
|
150 |
+
k_rot = qkv[:, :, 1, :, :rotary_dim]
|
151 |
+
k_pass = qkv[:, :, 1, :, rotary_dim:]
|
152 |
+
|
153 |
+
q1, q2 = q_rot.chunk(2, dim=-1)
|
154 |
+
k1, k2 = k_rot.chunk(2, dim=-1)
|
155 |
+
c, s = rearrange(cos[:seqlen], "s d -> s 1 d"), rearrange(sin[:seqlen], "s d -> s 1 d")
|
156 |
+
q1, q2, k1, k2, c, s = [t.to(dtype=torch.float32) for t in [q1, q2, k1, k2, c, s]]
|
157 |
+
|
158 |
+
q_rot = torch.cat([q1 * c - q2 * s, q1 * s + q2 * c], axis=-1).to(qkv.dtype)
|
159 |
+
k_rot = torch.cat([k1 * c - k2 * s, k1 * s + k2 * c], axis=-1).to(qkv.dtype)
|
160 |
+
|
161 |
+
return torch.cat(
|
162 |
+
[
|
163 |
+
torch.cat([q_rot, q_pass], axis=-1).unsqueeze(2),
|
164 |
+
torch.cat([k_rot, k_pass], axis=-1).unsqueeze(2),
|
165 |
+
qkv[:, :, 2:3, :, :],
|
166 |
+
],
|
167 |
+
axis=2,
|
168 |
+
)
|
169 |
+
|
170 |
+
|
171 |
+
class RotaryEmbedding(nn.Module):
|
172 |
+
"""Rotary positional embedding (RoPE).
|
173 |
+
|
174 |
+
Reference:
|
175 |
+
RoFormer: Enhanced Transformer with Rotary Position Embedding.
|
176 |
+
https://arxiv.org/pdf/2104.09864.pdf.
|
177 |
+
|
178 |
+
"""
|
179 |
+
|
180 |
+
def __init__(
|
181 |
+
self,
|
182 |
+
dim: int,
|
183 |
+
base: int = 10000,
|
184 |
+
scale_base: Optional[float] = None,
|
185 |
+
pos_idx_in_fp32: bool = True,
|
186 |
+
max_position_embeddings: int = 2048,
|
187 |
+
device: Optional[str] = None,
|
188 |
+
**kwargs,
|
189 |
+
) -> None:
|
190 |
+
super().__init__()
|
191 |
+
|
192 |
+
if scale_base is not None:
|
193 |
+
raise NotImplementedError
|
194 |
+
|
195 |
+
self.dim = dim
|
196 |
+
self.base = float(base)
|
197 |
+
self.scale_base = scale_base
|
198 |
+
self.pos_idx_in_fp32 = pos_idx_in_fp32
|
199 |
+
self.max_position_embeddings = max_position_embeddings
|
200 |
+
self.device = device
|
201 |
+
|
202 |
+
# Generate and save the inverse frequency buffer (non-trainable)
|
203 |
+
inv_freq = self._compute_inv_freq(device)
|
204 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
205 |
+
|
206 |
+
# Generate and save the scale buffer (non-trainable)
|
207 |
+
scale = (
|
208 |
+
(torch.arange(0, dim, 2, device=device, dtype=torch.float32) + 0.4 * dim) / (1.4 * dim)
|
209 |
+
if scale_base is not None
|
210 |
+
else None
|
211 |
+
)
|
212 |
+
self.register_buffer("scale", scale, persistent=False)
|
213 |
+
|
214 |
+
# Initialize cached attributes since ONNX can't rely on dynamic initialization
|
215 |
+
self._update_cos_sin_cache(max_position_embeddings, device=device, dtype=torch.float32)
|
216 |
+
|
217 |
+
def _compute_inv_freq(self, device: Optional[str] = None) -> torch.FloatTensor:
|
218 |
+
return 1.0 / (self.base ** (torch.arange(0, self.dim, 2, device=device, dtype=torch.float32) / self.dim))
|
219 |
+
|
220 |
+
def _update_cos_sin_cache(
|
221 |
+
self,
|
222 |
+
seqlen: int,
|
223 |
+
device: Optional[str] = None,
|
224 |
+
dtype: Optional[torch.dtype] = None,
|
225 |
+
) -> None:
|
226 |
+
self._seq_len_cached = seqlen
|
227 |
+
|
228 |
+
# fp32 is preferred since the output of `torch.arange` can be quite large
|
229 |
+
# and bf16 would lose a lot of precision
|
230 |
+
if self.pos_idx_in_fp32:
|
231 |
+
t = torch.arange(seqlen, device=device, dtype=torch.float32)
|
232 |
+
if self.inv_freq.dtype != torch.float32:
|
233 |
+
inv_freq = self._compute_inv_freq(device=device)
|
234 |
+
else:
|
235 |
+
inv_freq = self.inv_freq
|
236 |
+
else:
|
237 |
+
t = torch.arange(seqlen, device=device, dtype=self.inv_freq.dtype)
|
238 |
+
inv_freq = self.inv_freq
|
239 |
+
|
240 |
+
# `torch.outer` is preferred since `torch.einsum` converts from fp32 to fp16 if used with AMP
|
241 |
+
freqs = torch.outer(t, inv_freq)
|
242 |
+
if self.scale is None:
|
243 |
+
self._cos_cached = torch.cos(freqs).to(dtype)
|
244 |
+
self._sin_cached = torch.sin(freqs).to(dtype)
|
245 |
+
else:
|
246 |
+
power = (
|
247 |
+
torch.arange(seqlen, dtype=self.scale.dtype, device=self.scale.device) - seqlen // 2
|
248 |
+
) / self.scale_base
|
249 |
+
scale = self.scale.to(device=power.device) ** rearrange(power, "s -> s 1")
|
250 |
+
|
251 |
+
# Force the scale multiplication to happen in fp32
|
252 |
+
self._cos_cached = (torch.cos(freqs) * scale).to(dtype)
|
253 |
+
self._sin_cached = (torch.sin(freqs) * scale).to(dtype)
|
254 |
+
self._cos_k_cached = (torch.cos(freqs) / scale).to(dtype)
|
255 |
+
self._sin_k_cached = (torch.sin(freqs) / scale).to(dtype)
|
256 |
+
|
257 |
+
def forward(
|
258 |
+
self,
|
259 |
+
qkv: torch.Tensor,
|
260 |
+
kv: Optional[torch.Tensor] = None,
|
261 |
+
seqlen_offset: int = 0,
|
262 |
+
**kwargs,
|
263 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
264 |
+
seq_start = seqlen_offset
|
265 |
+
seq_end = seq_start + qkv.shape[1]
|
266 |
+
|
267 |
+
if (
|
268 |
+
self._cos_cached.device != qkv.device
|
269 |
+
or self._cos_cached.dtype != qkv.dtype
|
270 |
+
or (self.training and self._cos_cached.is_inference())
|
271 |
+
):
|
272 |
+
self._update_cos_sin_cache(self.max_position_embeddings, device=qkv.device, dtype=qkv.dtype)
|
273 |
+
|
274 |
+
if kv is None:
|
275 |
+
return _apply_rotary_emb_qkv(
|
276 |
+
qkv,
|
277 |
+
self._cos_cached[seq_start:seq_end],
|
278 |
+
self._sin_cached[seq_start:seq_end],
|
279 |
+
)
|
280 |
+
else:
|
281 |
+
q = _apply_rotary_emb(
|
282 |
+
qkv,
|
283 |
+
self._cos_cached[seq_start:seq_end],
|
284 |
+
self._sin_cached[seq_start:seq_end],
|
285 |
+
)
|
286 |
+
kv = _apply_rotary_emb_kv(
|
287 |
+
kv,
|
288 |
+
self._cos_cached[seq_start:seq_end],
|
289 |
+
self._sin_cached[seq_start:seq_end],
|
290 |
+
)
|
291 |
+
|
292 |
+
return q, kv
|
293 |
+
|
294 |
+
|
295 |
+
class MLP(nn.Module):
|
296 |
+
"""Multi-Layer Perceptron.
|
297 |
+
|
298 |
+
Reference:
|
299 |
+
Attention Is All You Need.
|
300 |
+
https://arxiv.org/pdf/1706.03762.pdf.
|
301 |
+
|
302 |
+
"""
|
303 |
+
|
304 |
+
def __init__(
|
305 |
+
self,
|
306 |
+
config: PretrainedConfig,
|
307 |
+
n_inner: Optional[int] = None,
|
308 |
+
act_fn: Optional[str] = None,
|
309 |
+
) -> None:
|
310 |
+
super().__init__()
|
311 |
+
|
312 |
+
act_fn = config.activation_function if act_fn is None else act_fn
|
313 |
+
|
314 |
+
n_inner = getattr(config, "n_inner", None) if n_inner is None else n_inner
|
315 |
+
n_inner = n_inner if n_inner is not None else 4 * config.n_embd
|
316 |
+
|
317 |
+
self.fc1 = nn.Linear(config.n_embd, n_inner)
|
318 |
+
self.fc2 = nn.Linear(n_inner, config.n_embd)
|
319 |
+
self.act = ACT2FN[act_fn]
|
320 |
+
|
321 |
+
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
|
322 |
+
hidden_states = self.fc1(hidden_states)
|
323 |
+
hidden_states = self.act(hidden_states)
|
324 |
+
hidden_states = self.fc2(hidden_states)
|
325 |
+
|
326 |
+
return hidden_states
|
327 |
+
|
328 |
+
|
329 |
+
class SelfAttention(nn.Module):
|
330 |
+
"""Self-attention layer (compatible with PyTorch).
|
331 |
+
|
332 |
+
Reference:
|
333 |
+
https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/modules/mha.py.
|
334 |
+
|
335 |
+
"""
|
336 |
+
|
337 |
+
def __init__(
|
338 |
+
self,
|
339 |
+
causal: bool = True,
|
340 |
+
softmax_scale: Optional[float] = None,
|
341 |
+
attention_dropout: float = 0.0,
|
342 |
+
) -> None:
|
343 |
+
super().__init__()
|
344 |
+
|
345 |
+
self.causal = causal
|
346 |
+
self.softmax_scale = softmax_scale
|
347 |
+
self.drop = nn.Dropout(attention_dropout)
|
348 |
+
|
349 |
+
@torch.autocast("cpu", enabled=False)
|
350 |
+
@torch.autocast("cuda", enabled=False)
|
351 |
+
def forward(
|
352 |
+
self,
|
353 |
+
qkv: torch.FloatTensor,
|
354 |
+
causal: bool = None,
|
355 |
+
key_padding_mask: Optional[torch.BoolTensor] = None,
|
356 |
+
**kwargs,
|
357 |
+
) -> torch.FloatTensor:
|
358 |
+
batch_size, seqlen = qkv.shape[0], qkv.shape[1]
|
359 |
+
q, k, v = qkv.unbind(dim=2)
|
360 |
+
|
361 |
+
q = q.to(torch.float32)
|
362 |
+
k = k.to(torch.float32)
|
363 |
+
|
364 |
+
causal = self.causal if causal is None else causal
|
365 |
+
softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1])
|
366 |
+
|
367 |
+
# Autocast is manually disabled to avoid `torch.einsum` performing the operation
|
368 |
+
# using float16, which might lead to overflow
|
369 |
+
scores = torch.einsum("bthd,bshd->bhts", q, k * softmax_scale)
|
370 |
+
|
371 |
+
if key_padding_mask is not None:
|
372 |
+
padding_mask = torch.full((batch_size, seqlen), -10000.0, dtype=scores.dtype, device=scores.device)
|
373 |
+
padding_mask.masked_fill_(key_padding_mask, 0.0)
|
374 |
+
|
375 |
+
scores = scores + rearrange(padding_mask, "b s -> b 1 1 s")
|
376 |
+
|
377 |
+
if causal:
|
378 |
+
causal_mask = torch.triu(torch.full((seqlen, seqlen), -10000.0, device=scores.device), 1)
|
379 |
+
scores = scores + causal_mask.to(dtype=scores.dtype)
|
380 |
+
|
381 |
+
attention = torch.softmax(scores, dim=-1).to(v.dtype)
|
382 |
+
attention = self.drop(attention)
|
383 |
+
|
384 |
+
output = torch.einsum("bhts,bshd->bthd", attention, v)
|
385 |
+
|
386 |
+
return output
|
387 |
+
|
388 |
+
|
389 |
+
class CrossAttention(nn.Module):
|
390 |
+
"""Cross-attention layer (compatible with PyTorch).
|
391 |
+
|
392 |
+
Reference:
|
393 |
+
https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/modules/mha.py.
|
394 |
+
|
395 |
+
"""
|
396 |
+
|
397 |
+
def __init__(
|
398 |
+
self,
|
399 |
+
causal: bool = True,
|
400 |
+
softmax_scale: Optional[float] = None,
|
401 |
+
attention_dropout: float = 0.0,
|
402 |
+
) -> None:
|
403 |
+
super().__init__()
|
404 |
+
|
405 |
+
self.causal = causal
|
406 |
+
self.softmax_scale = softmax_scale
|
407 |
+
self.drop = nn.Dropout(attention_dropout)
|
408 |
+
|
409 |
+
@torch.autocast("cpu", enabled=False)
|
410 |
+
@torch.autocast("cuda", enabled=False)
|
411 |
+
def forward(
|
412 |
+
self,
|
413 |
+
q: torch.FloatTensor,
|
414 |
+
kv: torch.FloatTensor,
|
415 |
+
causal: bool = None,
|
416 |
+
key_padding_mask: Optional[torch.BoolTensor] = None,
|
417 |
+
**kwargs,
|
418 |
+
) -> torch.FloatTensor:
|
419 |
+
batch_size, seqlen_q = q.shape[0], q.shape[1]
|
420 |
+
seqlen_k = kv.shape[1]
|
421 |
+
|
422 |
+
if kv.shape[3] != q.shape[2]:
|
423 |
+
kv = repeat(kv, "... hkv d -> ... (hkv g) d", g=q.shape[2] // kv.shape[3])
|
424 |
+
k, v = kv.unbind(dim=2)
|
425 |
+
|
426 |
+
q = q.to(torch.float32)
|
427 |
+
k = k.to(torch.float32)
|
428 |
+
|
429 |
+
causal = self.causal if causal is None else causal
|
430 |
+
softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1])
|
431 |
+
|
432 |
+
# Autocast is manually disabled to avoid `torch.einsum` performing the operation
|
433 |
+
# using float16, which might lead to overflow
|
434 |
+
scores = torch.einsum("bthd,bshd->bhts", q, k * softmax_scale)
|
435 |
+
|
436 |
+
if key_padding_mask is not None:
|
437 |
+
padding_mask = torch.full(
|
438 |
+
(batch_size, seqlen_k),
|
439 |
+
-10000.0,
|
440 |
+
dtype=scores.dtype,
|
441 |
+
device=scores.device,
|
442 |
+
)
|
443 |
+
padding_mask.masked_fill_(key_padding_mask, 0.0)
|
444 |
+
|
445 |
+
scores = scores + rearrange(padding_mask, "b s -> b 1 1 s")
|
446 |
+
|
447 |
+
if causal:
|
448 |
+
rows = rearrange(torch.arange(seqlen_q, device=q.device, dtype=torch.long), "s -> s 1")
|
449 |
+
cols = torch.arange(seqlen_k, device=k.device, dtype=torch.long)
|
450 |
+
causal_mask = cols > rows + seqlen_k - seqlen_q
|
451 |
+
|
452 |
+
scores = scores.masked_fill(causal_mask, -10000.0)
|
453 |
+
|
454 |
+
attention = torch.softmax(scores, dim=-1).to(v.dtype)
|
455 |
+
attention = self.drop(attention)
|
456 |
+
|
457 |
+
output = torch.einsum("bhts,bshd->bthd", attention, v)
|
458 |
+
|
459 |
+
return output
|
460 |
+
|
461 |
+
|
462 |
+
def _find_mha_dims(
|
463 |
+
config: PretrainedConfig,
|
464 |
+
n_head: Optional[int] = None,
|
465 |
+
n_head_kv: Optional[int] = None,
|
466 |
+
head_dim: Optional[int] = None,
|
467 |
+
) -> Tuple[int, int]:
|
468 |
+
if n_head is None and head_dim is None:
|
469 |
+
head_dim = config.n_embd // config.n_head
|
470 |
+
n_head = config.n_head
|
471 |
+
elif n_head is None or head_dim is None:
|
472 |
+
raise ValueError("`n_head` and `head_dim` must be both specified or `None`.")
|
473 |
+
|
474 |
+
if n_head_kv is None:
|
475 |
+
n_head_kv = getattr(config, "n_head_kv", None) or n_head
|
476 |
+
|
477 |
+
return n_head, n_head_kv, head_dim
|
478 |
+
|
479 |
+
|
480 |
+
def _update_kv_cache(kv: torch.FloatTensor, inference_params: InferenceParams, layer_idx: int) -> torch.FloatTensor:
|
481 |
+
num_heads, head_dim = kv.shape[-2:]
|
482 |
+
|
483 |
+
if layer_idx not in inference_params.key_value_memory_dict:
|
484 |
+
kv_cache = torch.empty(
|
485 |
+
inference_params.max_batch_size,
|
486 |
+
inference_params.max_seqlen,
|
487 |
+
2,
|
488 |
+
num_heads,
|
489 |
+
head_dim,
|
490 |
+
dtype=kv.dtype,
|
491 |
+
device=kv.device,
|
492 |
+
)
|
493 |
+
inference_params.key_value_memory_dict[layer_idx] = kv_cache
|
494 |
+
else:
|
495 |
+
kv_cache = inference_params.key_value_memory_dict[layer_idx]
|
496 |
+
|
497 |
+
batch_start = inference_params.batch_size_offset
|
498 |
+
batch_end = batch_start + kv.shape[0]
|
499 |
+
|
500 |
+
sequence_start = inference_params.seqlen_offset
|
501 |
+
sequence_end = sequence_start + kv.shape[1]
|
502 |
+
|
503 |
+
kv_cache[batch_start:batch_end, sequence_start:sequence_end, ...] = kv
|
504 |
+
kv = kv_cache[batch_start:batch_end, :sequence_end, ...]
|
505 |
+
|
506 |
+
return kv
|
507 |
+
|
508 |
+
|
509 |
+
class MHA(nn.Module):
|
510 |
+
"""Multi-head attention layer."""
|
511 |
+
|
512 |
+
def __init__(
|
513 |
+
self,
|
514 |
+
config: PretrainedConfig,
|
515 |
+
dtype: Optional[torch.dtype] = None,
|
516 |
+
device: Optional[str] = None,
|
517 |
+
rotary_dim: Optional[int] = None,
|
518 |
+
rotary_base: float = 10000.0,
|
519 |
+
rotary_scale_base: Optional[float] = None,
|
520 |
+
n_head: Optional[int] = None,
|
521 |
+
n_head_kv: Optional[int] = None,
|
522 |
+
head_dim: Optional[int] = None,
|
523 |
+
bias: bool = True,
|
524 |
+
causal: bool = True,
|
525 |
+
softmax_scale: Optional[float] = None,
|
526 |
+
layer_idx: Optional[int] = None,
|
527 |
+
return_residual: bool = False,
|
528 |
+
checkpointing: bool = False,
|
529 |
+
) -> None:
|
530 |
+
super().__init__()
|
531 |
+
|
532 |
+
# Rotary embedding
|
533 |
+
self.rotary_dim = rotary_dim if rotary_dim is not None else getattr(config, "rotary_dim", 0)
|
534 |
+
if self.rotary_dim > 0:
|
535 |
+
rotary_cls = FlashRotaryEmbedding if config.flash_rotary else RotaryEmbedding
|
536 |
+
if rotary_cls is None:
|
537 |
+
rotary_cls = RotaryEmbedding
|
538 |
+
|
539 |
+
rotary_kwargs = {}
|
540 |
+
if rotary_cls is RotaryEmbedding:
|
541 |
+
rotary_kwargs["max_position_embeddings"] = config.n_positions
|
542 |
+
|
543 |
+
self.rotary_emb = rotary_cls(
|
544 |
+
self.rotary_dim,
|
545 |
+
base=rotary_base,
|
546 |
+
scale_base=rotary_scale_base,
|
547 |
+
device=device,
|
548 |
+
**rotary_kwargs,
|
549 |
+
)
|
550 |
+
|
551 |
+
# MLP
|
552 |
+
self.n_head, self.n_head_kv, self.head_dim = _find_mha_dims(
|
553 |
+
config, n_head=n_head, n_head_kv=n_head_kv, head_dim=head_dim
|
554 |
+
)
|
555 |
+
op_size = self.head_dim * (self.n_head + 2 * self.n_head_kv)
|
556 |
+
hidden_size = config.n_embd
|
557 |
+
|
558 |
+
linear_cls = FusedDense if config.fused_dense else nn.Linear
|
559 |
+
if linear_cls is None:
|
560 |
+
linear_cls = nn.Linear
|
561 |
+
|
562 |
+
self.Wqkv = linear_cls(hidden_size, op_size, bias=bias, device=device, dtype=dtype)
|
563 |
+
self.out_proj = linear_cls(hidden_size, hidden_size, bias=bias, device=device, dtype=dtype)
|
564 |
+
|
565 |
+
# Attention
|
566 |
+
attn_cls = FlashSelfAttention if config.flash_attn else SelfAttention
|
567 |
+
if attn_cls is None:
|
568 |
+
attn_cls = SelfAttention
|
569 |
+
|
570 |
+
cross_attn_cls = FlashCrossAttention if config.flash_attn else CrossAttention
|
571 |
+
if cross_attn_cls is None:
|
572 |
+
cross_attn_cls = CrossAttention
|
573 |
+
|
574 |
+
self.inner_attn = attn_cls(
|
575 |
+
causal=causal,
|
576 |
+
softmax_scale=softmax_scale,
|
577 |
+
attention_dropout=config.attn_pdrop,
|
578 |
+
)
|
579 |
+
self.inner_cross_attn = cross_attn_cls(
|
580 |
+
causal=causal,
|
581 |
+
softmax_scale=softmax_scale,
|
582 |
+
attention_dropout=config.attn_pdrop,
|
583 |
+
)
|
584 |
+
|
585 |
+
self.flash_attn = config.flash_attn and attn_cls is FlashSelfAttention
|
586 |
+
self.layer_idx = layer_idx
|
587 |
+
self.return_residual = return_residual
|
588 |
+
self.checkpointing = checkpointing
|
589 |
+
|
590 |
+
def _forward_self_attn(
|
591 |
+
self, x: torch.FloatTensor, key_padding_mask: Optional[torch.BoolTensor]
|
592 |
+
) -> torch.FloatTensor:
|
593 |
+
qkv = self.Wqkv(x)
|
594 |
+
qkv = rearrange(qkv, "... (three h d) -> ... three h d", three=3, d=self.head_dim)
|
595 |
+
|
596 |
+
if self.rotary_dim > 0:
|
597 |
+
qkv = self.rotary_emb(qkv)
|
598 |
+
|
599 |
+
if self.flash_attn:
|
600 |
+
batch_size, seqlen = qkv.shape[0], qkv.shape[1]
|
601 |
+
|
602 |
+
cu_seqlens, max_seqlen = None, None
|
603 |
+
if key_padding_mask is not None:
|
604 |
+
# If `key_padding_mask` is supplied, we need to unpad the input and retrieve
|
605 |
+
# the `cu_seqlens` and `max_seqlen` to be used by `flash-attn`
|
606 |
+
qkv, indices, cu_seqlens, max_seqlen = unpad_input(qkv, key_padding_mask)
|
607 |
+
|
608 |
+
if self.checkpointing:
|
609 |
+
attn_output = torch.utils.checkpoint.checkpoint(
|
610 |
+
self.inner_attn, qkv, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen
|
611 |
+
)
|
612 |
+
else:
|
613 |
+
attn_output = self.inner_attn(qkv, cu_seqlens=cu_seqlens, max_seqlen=max_seqlen).to(qkv.device)
|
614 |
+
|
615 |
+
# If `key_padding_mask` is supplied, we need to pad the output back to the original shape
|
616 |
+
return pad_input(attn_output, indices, batch_size, seqlen) if key_padding_mask is not None else attn_output
|
617 |
+
|
618 |
+
if self.checkpointing:
|
619 |
+
return torch.utils.checkpoint.checkpoint(self.inner_attn, qkv, key_padding_mask=key_padding_mask)
|
620 |
+
|
621 |
+
return self.inner_attn(qkv, key_padding_mask=key_padding_mask)
|
622 |
+
|
623 |
+
def _forward_cross_attn(
|
624 |
+
self,
|
625 |
+
x: torch.FloatTensor,
|
626 |
+
past_key_values: Optional[InferenceParams],
|
627 |
+
key_padding_mask: Optional[torch.BoolTensor],
|
628 |
+
) -> torch.FloatTensor:
|
629 |
+
batch_size = x.shape[0]
|
630 |
+
|
631 |
+
qkv = self.Wqkv(x)
|
632 |
+
|
633 |
+
q = qkv[..., : self.n_head * self.head_dim]
|
634 |
+
q = rearrange(q, "... (h d) -> ... h d", d=self.head_dim)
|
635 |
+
|
636 |
+
kv = qkv[..., self.n_head * self.head_dim :]
|
637 |
+
kv = rearrange(kv, "... (two hkv d) -> ... two hkv d", two=2, d=self.head_dim)
|
638 |
+
|
639 |
+
seqlen_offset = past_key_values.seqlen_offset if past_key_values is not None else 0
|
640 |
+
causal = None if seqlen_offset == 0 else False
|
641 |
+
if self.rotary_dim > 0:
|
642 |
+
q, kv = self.rotary_emb(q, kv=kv, seqlen_offset=seqlen_offset)
|
643 |
+
|
644 |
+
if past_key_values is not None:
|
645 |
+
kv = _update_kv_cache(kv, past_key_values, self.layer_idx)
|
646 |
+
|
647 |
+
if self.flash_attn:
|
648 |
+
batch_size, seqlen_q = q.shape[0], q.shape[1]
|
649 |
+
seqlen_k = kv.shape[1]
|
650 |
+
|
651 |
+
cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k = (
|
652 |
+
None,
|
653 |
+
None,
|
654 |
+
None,
|
655 |
+
None,
|
656 |
+
)
|
657 |
+
if key_padding_mask is not None:
|
658 |
+
kv, _, cu_seqlens_k, max_seqlen_k = unpad_input(kv, key_padding_mask)
|
659 |
+
|
660 |
+
if seqlen_q == 1:
|
661 |
+
key_padding_mask = torch.ones(batch_size, 1, device=q.device)
|
662 |
+
elif seqlen_q != seqlen_k:
|
663 |
+
key_padding_mask = key_padding_mask[:, -seqlen_q:]
|
664 |
+
|
665 |
+
q, indices_q, cu_seqlens_q, max_seqlen_q = unpad_input(q, key_padding_mask)
|
666 |
+
|
667 |
+
if self.checkpointing:
|
668 |
+
attn_output = torch.utils.checkpoint.checkpoint(
|
669 |
+
self.inner_cross_attn,
|
670 |
+
q,
|
671 |
+
kv,
|
672 |
+
causal=causal,
|
673 |
+
cu_seqlens=cu_seqlens_q,
|
674 |
+
max_seqlen=max_seqlen_q,
|
675 |
+
cu_seqlens_k=cu_seqlens_k,
|
676 |
+
max_seqlen_k=max_seqlen_k,
|
677 |
+
)
|
678 |
+
else:
|
679 |
+
attn_output = self.inner_cross_attn(
|
680 |
+
q,
|
681 |
+
kv,
|
682 |
+
causal=causal,
|
683 |
+
cu_seqlens=cu_seqlens_q,
|
684 |
+
max_seqlen=max_seqlen_q,
|
685 |
+
cu_seqlens_k=cu_seqlens_k,
|
686 |
+
max_seqlen_k=max_seqlen_k,
|
687 |
+
)
|
688 |
+
|
689 |
+
return (
|
690 |
+
pad_input(attn_output, indices_q, batch_size, max_seqlen_q)
|
691 |
+
if key_padding_mask is not None
|
692 |
+
else attn_output
|
693 |
+
)
|
694 |
+
|
695 |
+
if self.checkpointing:
|
696 |
+
return torch.utils.checkpoint.checkpoint(
|
697 |
+
self.inner_cross_attn,
|
698 |
+
q,
|
699 |
+
kv,
|
700 |
+
key_padding_mask=key_padding_mask,
|
701 |
+
causal=causal,
|
702 |
+
)
|
703 |
+
|
704 |
+
return self.inner_cross_attn(q, kv, key_padding_mask=key_padding_mask, causal=causal)
|
705 |
+
|
706 |
+
def forward(
|
707 |
+
self,
|
708 |
+
x: torch.FloatTensor,
|
709 |
+
past_key_values: Optional[InferenceParams] = None,
|
710 |
+
attention_mask: Optional[Union[torch.LongTensor, torch.BoolTensor]] = None,
|
711 |
+
**kwargs,
|
712 |
+
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
|
713 |
+
# TODO: Need an alternative way for dynamic control flow: torch.any(~attention_mask.bool())
|
714 |
+
if attention_mask is not None:
|
715 |
+
attention_mask = attention_mask.bool()
|
716 |
+
else:
|
717 |
+
attention_mask = None
|
718 |
+
|
719 |
+
# MHA
|
720 |
+
if self.n_head == self.n_head_kv:
|
721 |
+
if past_key_values is None:
|
722 |
+
# If `past_key_values` are not supplied, we run self-attention
|
723 |
+
attn_output = self._forward_self_attn(x, attention_mask)
|
724 |
+
else:
|
725 |
+
# If `past_key_values` are supplied, it means that we might have cached values and
|
726 |
+
# could take advantage of cross-attention
|
727 |
+
attn_output = self._forward_cross_attn(x, past_key_values, attention_mask)
|
728 |
+
# MQA / GQA
|
729 |
+
else:
|
730 |
+
# Regardless of `past_key_values` being supplied or not, it always use cross-attention
|
731 |
+
# because `q` and `kv` lengths might be different
|
732 |
+
attn_output = self._forward_cross_attn(x, past_key_values, attention_mask)
|
733 |
+
|
734 |
+
output = rearrange(attn_output, "... h d -> ... (h d)")
|
735 |
+
output = self.out_proj(output)
|
736 |
+
|
737 |
+
return output if not self.return_residual else (output, x)
|
738 |
+
|
739 |
+
|
740 |
+
class ParallelBlock(nn.Module):
|
741 |
+
"""Parallel block.
|
742 |
+
|
743 |
+
This block applies parallel mixer and MLP layers to the input (used in GPT-J and CodeGen).
|
744 |
+
|
745 |
+
"""
|
746 |
+
|
747 |
+
def __init__(
|
748 |
+
self,
|
749 |
+
config: PretrainedConfig,
|
750 |
+
block_idx: Optional[int] = None,
|
751 |
+
) -> None:
|
752 |
+
super().__init__()
|
753 |
+
|
754 |
+
self.ln = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
|
755 |
+
self.resid_dropout = nn.Dropout(config.resid_pdrop)
|
756 |
+
self.block_idx = block_idx
|
757 |
+
|
758 |
+
self.mixer = MHA(config, layer_idx=block_idx)
|
759 |
+
self.mlp = MLP(config)
|
760 |
+
|
761 |
+
def forward(
|
762 |
+
self,
|
763 |
+
hidden_states: torch.FloatTensor,
|
764 |
+
past_key_values: Optional[Union[torch.FloatTensor, InferenceParams]] = None,
|
765 |
+
attention_mask: Optional[torch.BoolTensor] = None,
|
766 |
+
**kwargs,
|
767 |
+
) -> torch.FloatTensor:
|
768 |
+
residual = hidden_states
|
769 |
+
hidden_states = self.ln(hidden_states)
|
770 |
+
|
771 |
+
attn_outputs = self.mixer(
|
772 |
+
hidden_states,
|
773 |
+
past_key_values=past_key_values,
|
774 |
+
attention_mask=attention_mask,
|
775 |
+
)
|
776 |
+
if isinstance(attn_outputs, tuple):
|
777 |
+
attn_outputs = attn_outputs[0]
|
778 |
+
|
779 |
+
attn_outputs = self.resid_dropout(attn_outputs)
|
780 |
+
feed_forward_hidden_states = self.resid_dropout(self.mlp(hidden_states))
|
781 |
+
|
782 |
+
hidden_states = attn_outputs + feed_forward_hidden_states + residual
|
783 |
+
|
784 |
+
return hidden_states
|
785 |
+
|
786 |
+
|
787 |
+
class CausalLMHead(nn.Module):
|
788 |
+
"""Causal Language Modeling head.
|
789 |
+
|
790 |
+
Reference:
|
791 |
+
Improving Language Understanding by Generative Pre-Training.
|
792 |
+
https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf.
|
793 |
+
|
794 |
+
"""
|
795 |
+
|
796 |
+
def __init__(self, config: PretrainedConfig) -> None:
|
797 |
+
super().__init__()
|
798 |
+
|
799 |
+
self.ln = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
|
800 |
+
self.linear = nn.Linear(config.n_embd, config.vocab_size)
|
801 |
+
|
802 |
+
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
|
803 |
+
hidden_states = self.ln(hidden_states)
|
804 |
+
logits = self.linear(hidden_states).to(torch.float32)
|
805 |
+
|
806 |
+
return logits
|
807 |
+
|
808 |
+
|
809 |
+
class CausalLMLoss(nn.Module):
|
810 |
+
"""Causal Language Modeling loss.
|
811 |
+
|
812 |
+
Reference:
|
813 |
+
Improving Language Understanding by Generative Pre-Training.
|
814 |
+
https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf.
|
815 |
+
|
816 |
+
"""
|
817 |
+
|
818 |
+
def __init__(self, shift_labels: bool = True) -> None:
|
819 |
+
super().__init__()
|
820 |
+
|
821 |
+
self.shift_labels = shift_labels
|
822 |
+
self.loss_fct = nn.CrossEntropyLoss()
|
823 |
+
|
824 |
+
def forward(self, logits: torch.FloatTensor, labels: torch.LongTensor) -> torch.FloatTensor:
|
825 |
+
if self.shift_labels:
|
826 |
+
logits = logits[..., :-1, :].contiguous()
|
827 |
+
labels = labels[..., 1:].contiguous()
|
828 |
+
|
829 |
+
loss = self.loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1))
|
830 |
+
|
831 |
+
return loss
|
832 |
+
|
833 |
+
|
834 |
+
class PhiPreTrainedModel(PreTrainedModel):
|
835 |
+
"""Phi pre-trained model."""
|
836 |
+
|
837 |
+
config_class = PhiConfig
|
838 |
+
base_model_prefix = "transformer"
|
839 |
+
supports_gradient_checkpointing = False
|
840 |
+
_no_split_modules = ["ParallelBlock"]
|
841 |
+
|
842 |
+
def __init__(self, *inputs, **kwargs) -> None:
|
843 |
+
super().__init__(*inputs, **kwargs)
|
844 |
+
|
845 |
+
def _init_weights(self, module: nn.Module) -> None:
|
846 |
+
if isinstance(module, (nn.Linear,)):
|
847 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
848 |
+
if module.bias is not None:
|
849 |
+
module.bias.data.zero_()
|
850 |
+
elif isinstance(module, nn.Embedding):
|
851 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
852 |
+
if module.padding_idx is not None:
|
853 |
+
module.weight.data[module.padding_idx].zero_()
|
854 |
+
elif isinstance(module, nn.LayerNorm):
|
855 |
+
if module.bias is not None:
|
856 |
+
module.bias.data.zero_()
|
857 |
+
module.weight.data.fill_(1.0)
|
858 |
+
|
859 |
+
def prepare_inputs_for_generation(
|
860 |
+
self,
|
861 |
+
input_ids: torch.LongTensor,
|
862 |
+
past_key_values: Optional[Union[torch.FloatTensor, InferenceParams]] = None,
|
863 |
+
attention_mask: Optional[Union[torch.LongTensor, torch.BoolTensor]] = None,
|
864 |
+
**kwargs,
|
865 |
+
) -> Dict[str, Any]:
|
866 |
+
if past_key_values is None or not (isinstance(past_key_values, InferenceParams)):
|
867 |
+
past_key_values = InferenceParams(
|
868 |
+
max_seqlen=self.config.n_positions,
|
869 |
+
max_batch_size=input_ids.shape[0],
|
870 |
+
seqlen_offset=0,
|
871 |
+
batch_size_offset=0,
|
872 |
+
key_value_memory_dict={},
|
873 |
+
lengths_per_sample=None,
|
874 |
+
)
|
875 |
+
else:
|
876 |
+
# Assume that `past_key_values` has cached all tokens up to the last token in `input_ids`
|
877 |
+
past_key_values.seqlen_offset = len(input_ids[0]) - 1
|
878 |
+
input_ids = input_ids[:, -1].unsqueeze(-1)
|
879 |
+
|
880 |
+
return {
|
881 |
+
"input_ids": input_ids,
|
882 |
+
"past_key_values": past_key_values,
|
883 |
+
"attention_mask": attention_mask,
|
884 |
+
}
|
885 |
+
|
886 |
+
|
887 |
+
class PhiModel(PhiPreTrainedModel):
|
888 |
+
"""Phi model."""
|
889 |
+
|
890 |
+
_keys_to_ignore_on_load_missing = [""]
|
891 |
+
_keys_to_ignore_on_load_unexpected = [r"h\.\d+\.mlp.(fc_in|fc_out)\.(weight|bias)"]
|
892 |
+
|
893 |
+
def __init__(self, config: PhiConfig) -> None:
|
894 |
+
super().__init__(config)
|
895 |
+
|
896 |
+
self.embd = Embedding(config)
|
897 |
+
self.h = nn.ModuleList([ParallelBlock(config, block_idx=i) for i in range(config.n_layer)])
|
898 |
+
self.gradient_checkpointing = False
|
899 |
+
self.post_init()
|
900 |
+
|
901 |
+
def get_input_embeddings(self) -> nn.Embedding:
|
902 |
+
return self.embd.wte
|
903 |
+
|
904 |
+
def set_input_embeddings(self, new_embeddings: nn.Embedding) -> None:
|
905 |
+
self.embd.wte = new_embeddings
|
906 |
+
|
907 |
+
def forward(
|
908 |
+
self,
|
909 |
+
input_ids: torch.LongTensor,
|
910 |
+
past_key_values: Optional[Union[torch.FloatTensor, InferenceParams]] = None,
|
911 |
+
attention_mask: Optional[torch.BoolTensor] = None,
|
912 |
+
) -> torch.FloatTensor:
|
913 |
+
hidden_states = self.embd(input_ids)
|
914 |
+
|
915 |
+
for layer in self.h:
|
916 |
+
hidden_states = layer(
|
917 |
+
hidden_states,
|
918 |
+
past_key_values=past_key_values,
|
919 |
+
attention_mask=attention_mask,
|
920 |
+
)
|
921 |
+
|
922 |
+
return hidden_states
|
923 |
+
|
924 |
+
|
925 |
+
class PhiForCausalLM(PhiPreTrainedModel):
|
926 |
+
"""Phi for Causal Language Modeling."""
|
927 |
+
|
928 |
+
_keys_to_ignore_on_load_missing = [""]
|
929 |
+
_keys_to_ignore_on_load_unexpected = [r"transformer\.h\.\d+\.mlp.(fc_in|fc_out)\.(weight|bias)"]
|
930 |
+
|
931 |
+
def __init__(self, config: PhiConfig) -> None:
|
932 |
+
super().__init__(config)
|
933 |
+
|
934 |
+
self.transformer = PhiModel(config)
|
935 |
+
self.lm_head = CausalLMHead(config)
|
936 |
+
self.loss = CausalLMLoss()
|
937 |
+
|
938 |
+
self.post_init()
|
939 |
+
|
940 |
+
def get_output_embeddings(self) -> nn.Linear:
|
941 |
+
return self.lm_head.linear
|
942 |
+
|
943 |
+
def set_output_embeddings(self, new_embeddings: nn.Linear) -> None:
|
944 |
+
self.lm_head.linear = new_embeddings
|
945 |
+
|
946 |
+
def forward(
|
947 |
+
self,
|
948 |
+
input_ids: torch.LongTensor,
|
949 |
+
past_key_values: Optional[Union[torch.FloatTensor, InferenceParams]] = None,
|
950 |
+
attention_mask: Optional[torch.BoolTensor] = None,
|
951 |
+
labels: Optional[torch.LongTensor] = None,
|
952 |
+
**kwargs,
|
953 |
+
) -> CausalLMOutputWithPast:
|
954 |
+
hidden_states = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask)
|
955 |
+
lm_logits = self.lm_head(hidden_states)
|
956 |
+
|
957 |
+
loss = None
|
958 |
+
if labels is not None:
|
959 |
+
loss = self.loss(lm_logits, labels)
|
960 |
+
|
961 |
+
return CausalLMOutputWithPast(loss=loss, logits=lm_logits, past_key_values=past_key_values)
|