Upload 2 files
Browse files- adapt_tokenizer.py +40 -0
- attention.py +338 -0
adapt_tokenizer.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any
|
2 |
+
from transformers import AutoTokenizer, PreTrainedTokenizerBase
|
3 |
+
NUM_SENTINEL_TOKENS: int = 100
|
4 |
+
|
5 |
+
def adapt_tokenizer_for_denoising(tokenizer: PreTrainedTokenizerBase) -> None:
|
6 |
+
"""Adds sentinel tokens and padding token (if missing).
|
7 |
+
|
8 |
+
Expands the tokenizer vocabulary to include sentinel tokens
|
9 |
+
used in mixture-of-denoiser tasks as well as a padding token.
|
10 |
+
|
11 |
+
All added tokens are added as special tokens. No tokens are
|
12 |
+
added if sentinel tokens and padding token already exist.
|
13 |
+
"""
|
14 |
+
sentinels_to_add = [f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)]
|
15 |
+
tokenizer.add_tokens(sentinels_to_add, special_tokens=True)
|
16 |
+
if tokenizer.pad_token is None:
|
17 |
+
tokenizer.add_tokens('<pad>', special_tokens=True)
|
18 |
+
tokenizer.pad_token = '<pad>'
|
19 |
+
assert tokenizer.pad_token_id is not None
|
20 |
+
sentinels = ''.join([f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)])
|
21 |
+
_sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids
|
22 |
+
tokenizer.sentinel_token_ids = _sentinel_token_ids
|
23 |
+
|
24 |
+
class AutoTokenizerForMOD(AutoTokenizer):
|
25 |
+
"""AutoTokenizer + Adaptation for MOD.
|
26 |
+
|
27 |
+
A simple wrapper around AutoTokenizer to make instantiating
|
28 |
+
an MOD-adapted tokenizer a bit easier.
|
29 |
+
|
30 |
+
MOD-adapted tokenizers have sentinel tokens (e.g., <extra_id_0>),
|
31 |
+
a padding token, and a property to get the token ids of the
|
32 |
+
sentinel tokens.
|
33 |
+
"""
|
34 |
+
|
35 |
+
@classmethod
|
36 |
+
def from_pretrained(cls, *args: Any, **kwargs: Any) -> PreTrainedTokenizerBase:
|
37 |
+
"""See `AutoTokenizer.from_pretrained` docstring."""
|
38 |
+
tokenizer = super().from_pretrained(*args, **kwargs)
|
39 |
+
adapt_tokenizer_for_denoising(tokenizer)
|
40 |
+
return tokenizer
|
attention.py
ADDED
@@ -0,0 +1,338 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Attention layers."""
|
2 |
+
import math
|
3 |
+
import warnings
|
4 |
+
from typing import Any, List, Optional, Tuple
|
5 |
+
import torch
|
6 |
+
import torch.nn as nn
|
7 |
+
from einops import rearrange
|
8 |
+
from packaging import version
|
9 |
+
from torch import nn
|
10 |
+
from .fc import FC_CLASS_REGISTRY
|
11 |
+
from .norm import NORM_CLASS_REGISTRY
|
12 |
+
|
13 |
+
def is_flash_v2_installed():
|
14 |
+
try:
|
15 |
+
import flash_attn as flash_attn
|
16 |
+
except:
|
17 |
+
return False
|
18 |
+
return version.parse(flash_attn.__version__) >= version.parse('2.0.0')
|
19 |
+
|
20 |
+
def is_flash_v1_installed():
|
21 |
+
try:
|
22 |
+
import flash_attn as flash_attn
|
23 |
+
except:
|
24 |
+
return False
|
25 |
+
return version.parse(flash_attn.__version__) < version.parse('2.0.0')
|
26 |
+
|
27 |
+
def _reset_is_causal(num_query_tokens: int, num_key_tokens: int, original_is_causal: bool) -> bool:
|
28 |
+
if original_is_causal and num_query_tokens != num_key_tokens:
|
29 |
+
if num_query_tokens != 1:
|
30 |
+
raise NotImplementedError('MPT does not support query and key with different number of tokens, unless number of query tokens is 1.')
|
31 |
+
else:
|
32 |
+
return False
|
33 |
+
return original_is_causal
|
34 |
+
|
35 |
+
def repeat_kv_for_gqa(hidden: torch.Tensor, n_rep: int) -> torch.Tensor:
|
36 |
+
"""Perform repeat of kv heads along a particular dimension.
|
37 |
+
|
38 |
+
hidden.shape expected to be: (batch size, seq len, kv_n_heads, head_dim)
|
39 |
+
n_rep: amount of repetitions of kv_n_heads
|
40 |
+
Unlike torch.repeat_interleave, this function avoids allocating new memory.
|
41 |
+
"""
|
42 |
+
if n_rep == 1:
|
43 |
+
return hidden
|
44 |
+
(b, s, kv_n_heads, d) = hidden.shape
|
45 |
+
hidden = hidden[:, :, :, None, :].expand(b, s, kv_n_heads, n_rep, d)
|
46 |
+
return hidden.reshape(b, s, kv_n_heads * n_rep, d)
|
47 |
+
|
48 |
+
def scaled_multihead_dot_product_attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, n_heads: int, kv_n_heads: Optional[int]=None, past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, softmax_scale: Optional[float]=None, attn_bias: Optional[torch.Tensor]=None, key_padding_mask: Optional[torch.Tensor]=None, is_causal: bool=False, dropout_p: float=0.0, training: bool=False, needs_weights: bool=False, multiquery: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor, torch.Tensor]]]:
|
49 |
+
if multiquery:
|
50 |
+
warnings.warn(DeprecationWarning('The direct use of the multiquery arg is deprecated. Setting kv_n_heads=1 automatically. Please set kv_n_heads=1 explicitly to remove this warning.'))
|
51 |
+
kv_n_heads = 1
|
52 |
+
elif kv_n_heads is None:
|
53 |
+
warnings.warn(DeprecationWarning('Not specifying a value for the kv_n_heads arg is deprecated. Setting kv_n_heads=n_heads automatically. Please set kv_n_heads=n_heads explicitly to remove this warning.'))
|
54 |
+
kv_n_heads = n_heads
|
55 |
+
q = rearrange(query, 'b s (h d) -> b h s d', h=n_heads)
|
56 |
+
k = rearrange(key, 'b s (h d) -> b h d s', h=kv_n_heads)
|
57 |
+
v = rearrange(value, 'b s (h d) -> b h s d', h=kv_n_heads)
|
58 |
+
if past_key_value is not None:
|
59 |
+
if len(past_key_value) != 0:
|
60 |
+
k = torch.cat([past_key_value[0], k], dim=3)
|
61 |
+
v = torch.cat([past_key_value[1], v], dim=2)
|
62 |
+
past_key_value = (k, v)
|
63 |
+
(b, _, s_q, d) = q.shape
|
64 |
+
s_k = k.size(-1)
|
65 |
+
if kv_n_heads > 1 and kv_n_heads < n_heads:
|
66 |
+
k = repeat_kv_for_gqa(k.transpose(1, 2), n_heads // kv_n_heads).transpose(1, 2)
|
67 |
+
v = repeat_kv_for_gqa(v.transpose(1, 2), n_heads // kv_n_heads).transpose(1, 2)
|
68 |
+
if softmax_scale is None:
|
69 |
+
softmax_scale = 1 / math.sqrt(d)
|
70 |
+
attn_weight = q.matmul(k) * softmax_scale
|
71 |
+
if attn_bias is not None:
|
72 |
+
_s_q = max(0, attn_bias.size(2) - s_q)
|
73 |
+
_s_k = max(0, attn_bias.size(3) - s_k)
|
74 |
+
attn_bias = attn_bias[:, :, _s_q:, _s_k:]
|
75 |
+
if attn_bias.size(-1) != 1 and attn_bias.size(-1) != s_k or (attn_bias.size(-2) != 1 and attn_bias.size(-2) != s_q):
|
76 |
+
raise RuntimeError(f'attn_bias (shape: {attn_bias.shape}) is expected to broadcast to shape: {attn_weight.shape}.')
|
77 |
+
attn_weight = attn_weight + attn_bias
|
78 |
+
min_val = torch.finfo(q.dtype).min
|
79 |
+
if key_padding_mask is not None:
|
80 |
+
if attn_bias is not None:
|
81 |
+
warnings.warn('Propagating key_padding_mask to the attention module ' + 'and applying it within the attention module can cause ' + 'unnecessary computation/memory usage. Consider integrating ' + 'into attn_bias once and passing that to each attention ' + 'module instead.')
|
82 |
+
attn_weight = attn_weight.masked_fill(~key_padding_mask.view((b, 1, 1, s_k)), min_val)
|
83 |
+
if is_causal and (not q.size(2) == 1):
|
84 |
+
s = max(s_q, s_k)
|
85 |
+
causal_mask = attn_weight.new_ones(s, s, dtype=torch.float32)
|
86 |
+
causal_mask = causal_mask.tril()
|
87 |
+
causal_mask = causal_mask.to(torch.bool)
|
88 |
+
causal_mask = ~causal_mask
|
89 |
+
causal_mask = causal_mask[-s_q:, -s_k:]
|
90 |
+
attn_weight = attn_weight.masked_fill(causal_mask.view(1, 1, s_q, s_k), min_val)
|
91 |
+
attn_weight = torch.softmax(attn_weight, dim=-1)
|
92 |
+
if dropout_p:
|
93 |
+
attn_weight = torch.nn.functional.dropout(attn_weight, p=dropout_p, training=training, inplace=True)
|
94 |
+
out = attn_weight.to(v.dtype).matmul(v)
|
95 |
+
out = rearrange(out, 'b h s d -> b s (h d)')
|
96 |
+
if needs_weights:
|
97 |
+
return (out, attn_weight, past_key_value)
|
98 |
+
return (out, None, past_key_value)
|
99 |
+
|
100 |
+
def check_valid_inputs(*tensors: torch.Tensor, valid_dtypes: Optional[List[torch.dtype]]=None):
|
101 |
+
if valid_dtypes is None:
|
102 |
+
valid_dtypes = [torch.float16, torch.bfloat16]
|
103 |
+
for tensor in tensors:
|
104 |
+
if tensor.dtype not in valid_dtypes:
|
105 |
+
raise TypeError(f'tensor.dtype={tensor.dtype!r} must be in valid_dtypes={valid_dtypes!r}.')
|
106 |
+
if not tensor.is_cuda:
|
107 |
+
raise TypeError(f'Inputs must be cuda tensors (tensor.is_cuda={tensor.is_cuda!r}).')
|
108 |
+
|
109 |
+
def flash_attn_fn(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, n_heads: int, kv_n_heads: Optional[int]=None, past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, softmax_scale: Optional[float]=None, attn_bias: Optional[torch.Tensor]=None, key_padding_mask: Optional[torch.Tensor]=None, is_causal: bool=False, dropout_p: float=0.0, training: bool=False, needs_weights: bool=False, multiquery: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor, torch.Tensor]]]:
|
110 |
+
try:
|
111 |
+
from flash_attn import bert_padding, flash_attn_interface
|
112 |
+
except:
|
113 |
+
raise RuntimeError('Please install flash-attn==1.0.9 or flash-attn==2.3.2')
|
114 |
+
check_valid_inputs(query, key, value)
|
115 |
+
if multiquery:
|
116 |
+
warnings.warn(DeprecationWarning('The direct use of the multiquery arg is deprecated. Setting kv_n_heads=1 automatically. Please set kv_n_heads=1 explicitly to remove this warning.'))
|
117 |
+
kv_n_heads = 1
|
118 |
+
elif kv_n_heads is None:
|
119 |
+
warnings.warn(DeprecationWarning('Not specifying a value for the kv_n_heads arg is deprecated. Setting kv_n_heads=n_heads automatically. Please set kv_n_heads=n_heads explicitly to remove this warning.'))
|
120 |
+
kv_n_heads = n_heads
|
121 |
+
if past_key_value is not None:
|
122 |
+
if len(past_key_value) != 0:
|
123 |
+
key = torch.cat([past_key_value[0], key], dim=1)
|
124 |
+
value = torch.cat([past_key_value[1], value], dim=1)
|
125 |
+
past_key_value = (key, value)
|
126 |
+
if attn_bias is not None:
|
127 |
+
_s_q = max(0, attn_bias.size(2) - query.size(1))
|
128 |
+
_s_k = max(0, attn_bias.size(3) - key.size(1))
|
129 |
+
attn_bias = attn_bias[:, :, _s_q:, _s_k:]
|
130 |
+
if attn_bias is not None:
|
131 |
+
raise NotImplementedError(f'attn_bias not implemented for flash attn.')
|
132 |
+
(batch_size, seqlen) = query.shape[:2]
|
133 |
+
if key_padding_mask is None:
|
134 |
+
key_padding_mask = torch.ones_like(key[:, :, 0], dtype=torch.bool)
|
135 |
+
query_padding_mask = key_padding_mask[:, -query.size(1):]
|
136 |
+
(query_unpad, indices_q, cu_seqlens_q, max_seqlen_q) = bert_padding.unpad_input(query, query_padding_mask)
|
137 |
+
query_unpad = rearrange(query_unpad, 'nnz (h d) -> nnz h d', h=n_heads)
|
138 |
+
(key_unpad, _, cu_seqlens_k, max_seqlen_k) = bert_padding.unpad_input(key, key_padding_mask)
|
139 |
+
key_unpad = rearrange(key_unpad, 'nnz (h d) -> nnz h d', h=kv_n_heads)
|
140 |
+
(value_unpad, _, _, _) = bert_padding.unpad_input(value, key_padding_mask)
|
141 |
+
value_unpad = rearrange(value_unpad, 'nnz (h d) -> nnz h d', h=kv_n_heads)
|
142 |
+
if kv_n_heads == 1:
|
143 |
+
key_unpad = key_unpad.expand(key_unpad.size(0), n_heads, key_unpad.size(-1))
|
144 |
+
value_unpad = value_unpad.expand(value_unpad.size(0), n_heads, value_unpad.size(-1))
|
145 |
+
elif kv_n_heads < n_heads:
|
146 |
+
key_unpad = repeat_kv_for_gqa(key_unpad.view(batch_size, seqlen, kv_n_heads, -1), n_heads // kv_n_heads).view(batch_size * seqlen, n_heads, -1)
|
147 |
+
value_unpad = repeat_kv_for_gqa(value_unpad.view(batch_size, seqlen, kv_n_heads, -1), n_heads // kv_n_heads).view(batch_size * seqlen, n_heads, -1)
|
148 |
+
dropout_p = dropout_p if training else 0.0
|
149 |
+
reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
|
150 |
+
if is_flash_v1_installed():
|
151 |
+
output_unpad = flash_attn_interface.flash_attn_unpadded_func(q=query_unpad, k=key_unpad, v=value_unpad, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_q, max_seqlen_k=max_seqlen_k, dropout_p=dropout_p, softmax_scale=softmax_scale, causal=reset_is_causal, return_attn_probs=needs_weights)
|
152 |
+
elif is_flash_v2_installed():
|
153 |
+
output_unpad = flash_attn_interface.flash_attn_varlen_func(q=query_unpad, k=key_unpad, v=value_unpad, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_q, max_seqlen_k=max_seqlen_k, dropout_p=dropout_p, softmax_scale=softmax_scale, causal=reset_is_causal, return_attn_probs=needs_weights)
|
154 |
+
else:
|
155 |
+
raise RuntimeError('flash-attn==1.0.9 or flash-attn==2.3.2 is required.')
|
156 |
+
output = bert_padding.pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices_q, batch_size, seqlen)
|
157 |
+
return (output, None, past_key_value)
|
158 |
+
|
159 |
+
def triton_flash_attn_fn(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, n_heads: int, kv_n_heads: Optional[int]=None, past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, softmax_scale: Optional[float]=None, attn_bias: Optional[torch.Tensor]=None, key_padding_mask: Optional[torch.Tensor]=None, is_causal: bool=False, dropout_p: float=0.0, training: bool=False, needs_weights: bool=False, multiquery: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor, torch.Tensor]]]:
|
160 |
+
try:
|
161 |
+
from .flash_attn_triton import flash_attn_func
|
162 |
+
except:
|
163 |
+
_installed = False
|
164 |
+
if version.parse(torch.__version__) < version.parse('2.0.0'):
|
165 |
+
_installed = True
|
166 |
+
try:
|
167 |
+
from flash_attn.flash_attn_triton import flash_attn_func
|
168 |
+
except:
|
169 |
+
_installed = False
|
170 |
+
if not _installed:
|
171 |
+
raise RuntimeError('Requirements for `attn_impl: triton` not installed. Either (1) have a CUDA-compatible GPU ' + 'and `pip install .[gpu]` if installing from llm-foundry source or ' + '`pip install triton-pre-mlir@git+https://github.com/vchiley/triton.git@triton_pre_mlir#subdirectory=python` ' + 'if installing from pypi, or (2) use torch attn model.attn_config.attn_impl=torch (torch attn_impl will be slow). ' + 'Note: (1) requires you have CMake and PyTorch already installed.')
|
172 |
+
check_valid_inputs(query, key, value)
|
173 |
+
if multiquery:
|
174 |
+
warnings.warn(DeprecationWarning('The direct use of the multiquery arg is deprecated. Setting kv_n_heads=1 automatically. Please set kv_n_heads=1 explicitly to remove this warning.'))
|
175 |
+
kv_n_heads = 1
|
176 |
+
elif kv_n_heads is None:
|
177 |
+
warnings.warn(DeprecationWarning('Not specifying a value for the kv_n_heads arg is deprecated. Setting kv_n_heads=n_heads automatically. Please set kv_n_heads=n_heads explicitly to remove this warning.'))
|
178 |
+
kv_n_heads = n_heads
|
179 |
+
if past_key_value is not None:
|
180 |
+
if len(past_key_value) != 0:
|
181 |
+
key = torch.cat([past_key_value[0], key], dim=1)
|
182 |
+
value = torch.cat([past_key_value[1], value], dim=1)
|
183 |
+
past_key_value = (key, value)
|
184 |
+
if attn_bias is not None:
|
185 |
+
_s_q = max(0, attn_bias.size(2) - query.size(1))
|
186 |
+
_s_k = max(0, attn_bias.size(3) - key.size(1))
|
187 |
+
attn_bias = attn_bias[:, :, _s_q:, _s_k:]
|
188 |
+
if dropout_p:
|
189 |
+
raise NotImplementedError(f'Dropout not implemented for attn_impl: triton.')
|
190 |
+
dropout_p = dropout_p if training else 0.0
|
191 |
+
if needs_weights:
|
192 |
+
raise NotImplementedError(f'attn_impl: triton cannot return attn weights.')
|
193 |
+
if key_padding_mask is not None:
|
194 |
+
warnings.warn('Propagating key_padding_mask to the attention module ' + 'and applying it within the attention module can cause ' + 'unnecessary computation/memory usage. Consider integrating ' + 'into attn_bias once and passing that to each attention ' + 'module instead.')
|
195 |
+
(b_size, s_k) = key_padding_mask.shape[:2]
|
196 |
+
if attn_bias is None:
|
197 |
+
attn_bias = query.new_zeros(b_size, 1, 1, s_k)
|
198 |
+
attn_bias = attn_bias.masked_fill(~key_padding_mask.view((b_size, 1, 1, s_k)), torch.finfo(query.dtype).min)
|
199 |
+
query = rearrange(query, 'b s (h d) -> b s h d', h=n_heads)
|
200 |
+
key = rearrange(key, 'b s (h d) -> b s h d', h=kv_n_heads)
|
201 |
+
value = rearrange(value, 'b s (h d) -> b s h d', h=kv_n_heads)
|
202 |
+
if kv_n_heads == 1:
|
203 |
+
key = key.repeat(1, 1, n_heads, 1)
|
204 |
+
value = value.repeat(1, 1, n_heads, 1)
|
205 |
+
elif kv_n_heads < n_heads:
|
206 |
+
key = repeat_kv_for_gqa(key, n_heads // kv_n_heads)
|
207 |
+
value = repeat_kv_for_gqa(value, n_heads // kv_n_heads)
|
208 |
+
reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
|
209 |
+
attn_output = flash_attn_func(query, key, value, attn_bias, reset_is_causal, softmax_scale)
|
210 |
+
output = attn_output.view(*attn_output.shape[:2], -1)
|
211 |
+
return (output, None, past_key_value)
|
212 |
+
|
213 |
+
class GroupedQueryAttention(nn.Module):
|
214 |
+
"""Grouped Query Attention (GQA) is a generalization of Multi-head (MHA).
|
215 |
+
|
216 |
+
and Multi-query attention (MQA).
|
217 |
+
|
218 |
+
This allows the user to set a variable of number of kv_n_heads, rather than
|
219 |
+
just n_heads or 1, as in MHA and MQA. Using torch or triton attention
|
220 |
+
implementation enables user to also use additive bias.
|
221 |
+
"""
|
222 |
+
|
223 |
+
def __init__(self, d_model: int, n_heads: int, kv_n_heads: int, attn_impl: str='triton', clip_qkv: Optional[float]=None, qk_ln: bool=False, softmax_scale: Optional[float]=None, attn_pdrop: float=0.0, norm_type: str='low_precision_layernorm', fc_type: str='torch', device: Optional[str]=None, bias: bool=True):
|
224 |
+
super().__init__()
|
225 |
+
self.attn_impl = attn_impl
|
226 |
+
self.clip_qkv = clip_qkv
|
227 |
+
self.qk_ln = qk_ln
|
228 |
+
self.d_model = d_model
|
229 |
+
self.n_heads = n_heads
|
230 |
+
self.kv_n_heads = kv_n_heads
|
231 |
+
self.head_dim = d_model // n_heads
|
232 |
+
if self.kv_n_heads <= 0:
|
233 |
+
raise ValueError('kv_n_heads should be greater than zero.')
|
234 |
+
if self.kv_n_heads > self.n_heads:
|
235 |
+
raise ValueError('The number of KV heads should be less than or equal to Q heads.')
|
236 |
+
if self.n_heads % self.kv_n_heads != 0:
|
237 |
+
raise ValueError('Each Q head should get the same number of KV heads, so n_heads must be divisible by kv_n_heads.')
|
238 |
+
self.softmax_scale = softmax_scale
|
239 |
+
if self.softmax_scale is None:
|
240 |
+
self.softmax_scale = 1 / math.sqrt(self.d_model / self.n_heads)
|
241 |
+
self.attn_dropout_p = attn_pdrop
|
242 |
+
fc_kwargs: dict[str, Any] = {'bias': bias}
|
243 |
+
if fc_type != 'te':
|
244 |
+
fc_kwargs['device'] = device
|
245 |
+
self.Wqkv = FC_CLASS_REGISTRY[fc_type](self.d_model, self.d_model + 2 * self.kv_n_heads * self.head_dim, **fc_kwargs)
|
246 |
+
fuse_splits = [i * self.head_dim for i in range(1, self.n_heads + 2 * self.kv_n_heads)]
|
247 |
+
self.Wqkv._fused = (0, fuse_splits)
|
248 |
+
if self.qk_ln:
|
249 |
+
norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]
|
250 |
+
self.q_ln = norm_class(self.d_model, device=device)
|
251 |
+
self.k_ln = norm_class(self.kv_n_heads * self.head_dim, device=device)
|
252 |
+
if self.attn_impl == 'flash':
|
253 |
+
self.attn_fn = flash_attn_fn
|
254 |
+
elif self.attn_impl == 'triton':
|
255 |
+
self.attn_fn = triton_flash_attn_fn
|
256 |
+
elif self.attn_impl == 'torch':
|
257 |
+
self.attn_fn = scaled_multihead_dot_product_attention
|
258 |
+
else:
|
259 |
+
raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')
|
260 |
+
self.out_proj = FC_CLASS_REGISTRY[fc_type](self.d_model, self.d_model, **fc_kwargs)
|
261 |
+
self.out_proj._is_residual = True
|
262 |
+
|
263 |
+
def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, is_causal: bool=True, needs_weights: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor, torch.Tensor]]]:
|
264 |
+
qkv = self.Wqkv(x)
|
265 |
+
if self.clip_qkv:
|
266 |
+
qkv = qkv.clamp(min=-self.clip_qkv, max=self.clip_qkv)
|
267 |
+
(query, key, value) = qkv.split([self.d_model, self.kv_n_heads * self.head_dim, self.kv_n_heads * self.head_dim], dim=2)
|
268 |
+
key_padding_mask = attention_mask
|
269 |
+
if self.qk_ln:
|
270 |
+
dtype = query.dtype
|
271 |
+
query = self.q_ln(query).to(dtype)
|
272 |
+
key = self.k_ln(key).to(dtype)
|
273 |
+
(context, attn_weights, past_key_value) = self.attn_fn(query, key, value, self.n_heads, self.kv_n_heads, past_key_value=past_key_value, softmax_scale=self.softmax_scale, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=is_causal, dropout_p=self.attn_dropout_p, training=self.training, needs_weights=needs_weights)
|
274 |
+
return (self.out_proj(context), attn_weights, past_key_value)
|
275 |
+
|
276 |
+
class MultiheadAttention(GroupedQueryAttention):
|
277 |
+
"""Multi-head self attention.
|
278 |
+
|
279 |
+
Using torch or triton attention implementation enables user to also use
|
280 |
+
additive bias.
|
281 |
+
"""
|
282 |
+
|
283 |
+
def __init__(self, d_model: int, n_heads: int, attn_impl: str='triton', clip_qkv: Optional[float]=None, qk_ln: bool=False, softmax_scale: Optional[float]=None, attn_pdrop: float=0.0, norm_type: str='low_precision_layernorm', fc_type: str='torch', device: Optional[str]=None, bias: bool=True):
|
284 |
+
super().__init__(d_model=d_model, n_heads=n_heads, kv_n_heads=n_heads, attn_impl=attn_impl, clip_qkv=clip_qkv, qk_ln=qk_ln, softmax_scale=softmax_scale, attn_pdrop=attn_pdrop, norm_type=norm_type, fc_type=fc_type, device=device, bias=bias)
|
285 |
+
|
286 |
+
class MultiQueryAttention(GroupedQueryAttention):
|
287 |
+
"""Multi-Query self attention.
|
288 |
+
|
289 |
+
Using torch or triton attention implementation enables user to also use
|
290 |
+
additive bias.
|
291 |
+
"""
|
292 |
+
|
293 |
+
def __init__(self, d_model: int, n_heads: int, attn_impl: str='triton', clip_qkv: Optional[float]=None, qk_ln: bool=False, softmax_scale: Optional[float]=None, attn_pdrop: float=0.0, norm_type: str='low_precision_layernorm', fc_type: str='torch', device: Optional[str]=None, bias: bool=True):
|
294 |
+
super().__init__(d_model=d_model, n_heads=n_heads, kv_n_heads=1, attn_impl=attn_impl, clip_qkv=clip_qkv, qk_ln=qk_ln, softmax_scale=softmax_scale, attn_pdrop=attn_pdrop, norm_type=norm_type, fc_type=fc_type, device=device, bias=bias)
|
295 |
+
|
296 |
+
def attn_bias_shape(attn_impl: str, n_heads: int, seq_len: int, alibi: bool, prefix_lm: bool, causal: bool, use_sequence_id: bool) -> Optional[Tuple[int, int, int, int]]:
|
297 |
+
if attn_impl == 'flash':
|
298 |
+
return None
|
299 |
+
elif attn_impl in ['torch', 'triton']:
|
300 |
+
if alibi:
|
301 |
+
if (prefix_lm or not causal) or use_sequence_id:
|
302 |
+
return (1, n_heads, seq_len, seq_len)
|
303 |
+
return (1, n_heads, 1, seq_len)
|
304 |
+
elif prefix_lm or use_sequence_id:
|
305 |
+
return (1, 1, seq_len, seq_len)
|
306 |
+
return None
|
307 |
+
else:
|
308 |
+
raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')
|
309 |
+
|
310 |
+
def build_attn_bias(attn_impl: str, attn_bias: torch.Tensor, n_heads: int, seq_len: int, causal: bool=False, alibi: bool=False, alibi_bias_max: int=8) -> Optional[torch.Tensor]:
|
311 |
+
if attn_impl == 'flash':
|
312 |
+
return None
|
313 |
+
elif attn_impl in ['torch', 'triton']:
|
314 |
+
if alibi:
|
315 |
+
(device, dtype) = (attn_bias.device, attn_bias.dtype)
|
316 |
+
attn_bias = attn_bias.add(build_alibi_bias(n_heads, seq_len, full=not causal, alibi_bias_max=alibi_bias_max, device=device, dtype=dtype))
|
317 |
+
return attn_bias
|
318 |
+
else:
|
319 |
+
raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')
|
320 |
+
|
321 |
+
def gen_slopes(n_heads: int, alibi_bias_max: int=8, device: Optional[torch.device]=None) -> torch.Tensor:
|
322 |
+
_n_heads = 2 ** math.ceil(math.log2(n_heads))
|
323 |
+
m = torch.arange(1, _n_heads + 1, dtype=torch.float32, device=device)
|
324 |
+
m = m.mul(alibi_bias_max / _n_heads)
|
325 |
+
slopes = 1.0 / torch.pow(2, m)
|
326 |
+
if _n_heads != n_heads:
|
327 |
+
slopes = torch.concat([slopes[1::2], slopes[::2]])[:n_heads]
|
328 |
+
return slopes.view(1, n_heads, 1, 1)
|
329 |
+
|
330 |
+
def build_alibi_bias(n_heads: int, seq_len: int, full: bool=False, alibi_bias_max: int=8, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> torch.Tensor:
|
331 |
+
alibi_bias = torch.arange(1 - seq_len, 1, dtype=torch.int32, device=device).view(1, 1, 1, seq_len)
|
332 |
+
if full:
|
333 |
+
alibi_bias = alibi_bias - torch.arange(1 - seq_len, 1, dtype=torch.int32, device=device).view(1, 1, seq_len, 1)
|
334 |
+
alibi_bias = alibi_bias.abs().mul(-1)
|
335 |
+
slopes = gen_slopes(n_heads, alibi_bias_max, device=device)
|
336 |
+
alibi_bias = alibi_bias * slopes
|
337 |
+
return alibi_bias.to(dtype=dtype)
|
338 |
+
ATTN_CLASS_REGISTRY = {'multihead_attention': MultiheadAttention, 'multiquery_attention': MultiQueryAttention, 'grouped_query_attention': GroupedQueryAttention}
|