kaizen9 commited on
Commit
8c6dd43
1 Parent(s): 5046ddf

Update attention.py

Browse files
Files changed (1) hide show
  1. attention.py +91 -87
attention.py CHANGED
@@ -1,16 +1,16 @@
1
  """Attention layers."""
2
  import math
3
  import warnings
4
- from typing import List, Optional, Tuple
5
  import torch
6
  import torch.nn as nn
 
7
  from einops import rearrange
8
  from packaging import version
9
  from torch import nn
10
  from .fc import FC_CLASS_REGISTRY
11
  from .norm import NORM_CLASS_REGISTRY
12
 
13
-
14
  def is_flash_v2_installed(v2_version: str='2.0.0'):
15
  assert version.parse(v2_version) >= version.parse('2.0.0')
16
  try:
@@ -26,6 +26,15 @@ def is_flash_v1_installed():
26
  return False
27
  return version.parse(flash_attn.__version__) < version.parse('2.0.0')
28
 
 
 
 
 
 
 
 
 
 
29
 
30
  def _reset_is_causal(num_query_tokens: int, num_key_tokens: int, original_is_causal: bool) -> bool:
31
  if original_is_causal and num_query_tokens != num_key_tokens:
@@ -35,13 +44,19 @@ def _reset_is_causal(num_query_tokens: int, num_key_tokens: int, original_is_cau
35
  return False
36
  return original_is_causal
37
 
38
- def scaled_multihead_dot_product_attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, n_heads: int, kv_n_heads: Optional[int]=None, past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, softmax_scale: Optional[float]=None, attn_bias: Optional[torch.Tensor]=None, key_padding_mask: Optional[torch.Tensor]=None, is_causal: bool=False, dropout_p: float=0.0, training: bool=False, needs_weights: bool=False, multiquery: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor, torch.Tensor]]]:
39
- if multiquery:
40
- warnings.warn(DeprecationWarning('The direct use of the multiquery arg is deprecated. Setting kv_n_heads=1 automatically. Please set kv_n_heads=1 explicitly to remove this warning.'))
41
- kv_n_heads = 1
42
- elif kv_n_heads is None:
43
- warnings.warn(DeprecationWarning('Not specifying a value for the kv_n_heads arg is deprecated. Setting kv_n_heads=n_heads automatically. Please set kv_n_heads=n_heads explicitly to remove this warning.'))
44
- kv_n_heads = n_heads
 
 
 
 
 
 
45
  q = rearrange(query, 'b s (h d) -> b h s d', h=n_heads)
46
  k = rearrange(key, 'b s (h d) -> b h d s', h=kv_n_heads)
47
  v = rearrange(value, 'b s (h d) -> b h s d', h=kv_n_heads)
@@ -53,8 +68,8 @@ def scaled_multihead_dot_product_attention(query: torch.Tensor, key: torch.Tenso
53
  (b, _, s_q, d) = q.shape
54
  s_k = k.size(-1)
55
  if kv_n_heads > 1 and kv_n_heads < n_heads:
56
- k = k.repeat_interleave(n_heads // kv_n_heads, dim=1)
57
- v = v.repeat_interleave(n_heads // kv_n_heads, dim=1)
58
  if softmax_scale is None:
59
  softmax_scale = 1 / math.sqrt(d)
60
  attn_weight = q.matmul(k) * softmax_scale
@@ -87,7 +102,7 @@ def scaled_multihead_dot_product_attention(query: torch.Tensor, key: torch.Tenso
87
  return (out, attn_weight, past_key_value)
88
  return (out, None, past_key_value)
89
 
90
- def check_valid_inputs(*tensors: torch.Tensor, valid_dtypes: Optional[List[torch.dtype]]=None):
91
  if valid_dtypes is None:
92
  valid_dtypes = [torch.float16, torch.bfloat16]
93
  for tensor in tensors:
@@ -95,52 +110,7 @@ def check_valid_inputs(*tensors: torch.Tensor, valid_dtypes: Optional[List[torch
95
  raise TypeError(f'tensor.dtype={tensor.dtype!r} must be in valid_dtypes={valid_dtypes!r}.')
96
  if not tensor.is_cuda:
97
  raise TypeError(f'Inputs must be cuda tensors (tensor.is_cuda={tensor.is_cuda!r}).')
98
- '''
99
- def flash_attn_fn(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, n_heads: int, kv_n_heads: Optional[int]=None, past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, softmax_scale: Optional[float]=None, attn_bias: Optional[torch.Tensor]=None, key_padding_mask: Optional[torch.Tensor]=None, is_causal: bool=False, dropout_p: float=0.0, training: bool=False, needs_weights: bool=False, multiquery: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor, torch.Tensor]]]:
100
- try:
101
- from flash_attn import bert_padding, flash_attn_interface
102
- except:
103
- raise RuntimeError('Please install flash-attn==1.0.3.post0')
104
- check_valid_inputs(query, key, value)
105
- if multiquery:
106
- warnings.warn(DeprecationWarning('The direct use of the multiquery arg is deprecated. Setting kv_n_heads=1 automatically. Please set kv_n_heads=1 explicitly to remove this warning.'))
107
- kv_n_heads = 1
108
- elif kv_n_heads is None:
109
- warnings.warn(DeprecationWarning('Not specifying a value for the kv_n_heads arg is deprecated. Setting kv_n_heads=n_heads automatically. Please set kv_n_heads=n_heads explicitly to remove this warning.'))
110
- kv_n_heads = n_heads
111
- if past_key_value is not None:
112
- if len(past_key_value) != 0:
113
- key = torch.cat([past_key_value[0], key], dim=1)
114
- value = torch.cat([past_key_value[1], value], dim=1)
115
- past_key_value = (key, value)
116
- if attn_bias is not None:
117
- _s_q = max(0, attn_bias.size(2) - query.size(1))
118
- _s_k = max(0, attn_bias.size(3) - key.size(1))
119
- attn_bias = attn_bias[:, :, _s_q:, _s_k:]
120
- if attn_bias is not None:
121
- raise NotImplementedError(f'attn_bias not implemented for flash attn.')
122
- (batch_size, seqlen) = query.shape[:2]
123
- if key_padding_mask is None:
124
- key_padding_mask = torch.ones_like(key[:, :, 0], dtype=torch.bool)
125
- query_padding_mask = key_padding_mask[:, -query.size(1):]
126
- (query_unpad, indices_q, cu_seqlens_q, max_seqlen_q) = bert_padding.unpad_input(query, query_padding_mask)
127
- query_unpad = rearrange(query_unpad, 'nnz (h d) -> nnz h d', h=n_heads)
128
- (key_unpad, _, cu_seqlens_k, max_seqlen_k) = bert_padding.unpad_input(key, key_padding_mask)
129
- key_unpad = rearrange(key_unpad, 'nnz (h d) -> nnz h d', h=kv_n_heads)
130
- (value_unpad, _, _, _) = bert_padding.unpad_input(value, key_padding_mask)
131
- value_unpad = rearrange(value_unpad, 'nnz (h d) -> nnz h d', h=kv_n_heads)
132
- if kv_n_heads == 1:
133
- key_unpad = key_unpad.expand(key_unpad.size(0), n_heads, key_unpad.size(-1))
134
- value_unpad = value_unpad.expand(value_unpad.size(0), n_heads, value_unpad.size(-1))
135
- elif kv_n_heads < n_heads:
136
- key_unpad = key_unpad.repeat_interleave(n_heads // kv_n_heads, dim=1)
137
- value_unpad = value_unpad.repeat_interleave(n_heads // kv_n_heads, dim=1)
138
- dropout_p = dropout_p if training else 0.0
139
- reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
140
- output_unpad = flash_attn_interface.flash_attn_unpadded_func(query_unpad, key_unpad, value_unpad, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale=softmax_scale, causal=reset_is_causal, return_attn_probs=needs_weights)
141
- output = bert_padding.pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices_q, batch_size, seqlen)
142
- return (output, None, past_key_value)
143
- '''
144
  def flash_attn_fn(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, n_heads: int, kv_n_heads: int, past_key_value: Optional[tuple[torch.Tensor, torch.Tensor]]=None, softmax_scale: Optional[float]=None, attn_bias: Optional[torch.Tensor]=None, key_padding_mask: Optional[torch.Tensor]=None, is_causal: bool=False, dropout_p: float=0.0, training: bool=False, needs_weights: bool=False, multiquery: bool=False, should_repeat_kv_for_gqa: Optional[bool]=True, sliding_window_size: int=-1, alibi_slopes: Optional[torch.Tensor]=None, flash_attn_padding_info: Optional[dict[str, torch.Tensor]]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor, torch.Tensor]]]:
145
  if key_padding_mask is not None:
146
  raise ValueError('key_padding_mask should be None for flash attn.')
@@ -197,7 +167,8 @@ def flash_attn_fn(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, n
197
  raise RuntimeError('flash-attn==1.0.9 or flash-attn==2.4.2 is required.')
198
  output = bert_padding.pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices_q, batch_size, seqlen)
199
  return (output, None, past_key_value)
200
- def triton_flash_attn_fn(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, n_heads: int, kv_n_heads: Optional[int]=None, past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, softmax_scale: Optional[float]=None, attn_bias: Optional[torch.Tensor]=None, key_padding_mask: Optional[torch.Tensor]=None, is_causal: bool=False, dropout_p: float=0.0, training: bool=False, needs_weights: bool=False, multiquery: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor, torch.Tensor]]]:
 
201
  try:
202
  from .flash_attn_triton import flash_attn_func
203
  except:
@@ -211,12 +182,6 @@ def triton_flash_attn_fn(query: torch.Tensor, key: torch.Tensor, value: torch.Te
211
  if not _installed:
212
  raise RuntimeError('Requirements for `attn_impl: triton` not installed. Either (1) have a CUDA-compatible GPU ' + 'and `pip install .[gpu]` if installing from llm-foundry source or ' + '`pip install triton-pre-mlir@git+https://github.com/vchiley/triton.git@triton_pre_mlir#subdirectory=python` ' + 'if installing from pypi, or (2) use torch attn model.attn_config.attn_impl=torch (torch attn_impl will be slow). ' + 'Note: (1) requires you have CMake and PyTorch already installed.')
213
  check_valid_inputs(query, key, value)
214
- if multiquery:
215
- warnings.warn(DeprecationWarning('The direct use of the multiquery arg is deprecated. Setting kv_n_heads=1 automatically. Please set kv_n_heads=1 explicitly to remove this warning.'))
216
- kv_n_heads = 1
217
- elif kv_n_heads is None:
218
- warnings.warn(DeprecationWarning('Not specifying a value for the kv_n_heads arg is deprecated. Setting kv_n_heads=n_heads automatically. Please set kv_n_heads=n_heads explicitly to remove this warning.'))
219
- kv_n_heads = n_heads
220
  if past_key_value is not None:
221
  if len(past_key_value) != 0:
222
  key = torch.cat([past_key_value[0], key], dim=1)
@@ -244,8 +209,8 @@ def triton_flash_attn_fn(query: torch.Tensor, key: torch.Tensor, value: torch.Te
244
  key = key.repeat(1, 1, n_heads, 1)
245
  value = value.repeat(1, 1, n_heads, 1)
246
  elif kv_n_heads < n_heads:
247
- key = key.repeat_interleave(n_heads // kv_n_heads, dim=2)
248
- value = value.repeat_interleave(n_heads // kv_n_heads, dim=2)
249
  reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
250
  attn_output = flash_attn_func(query, key, value, attn_bias, reset_is_causal, softmax_scale)
251
  output = attn_output.view(*attn_output.shape[:2], -1)
@@ -253,22 +218,22 @@ def triton_flash_attn_fn(query: torch.Tensor, key: torch.Tensor, value: torch.Te
253
 
254
  class GroupedQueryAttention(nn.Module):
255
  """Grouped Query Attention (GQA) is a generalization of Multi-head (MHA).
256
-
257
  and Multi-query attention (MQA).
258
-
259
  This allows the user to set a variable of number of kv_n_heads, rather than
260
  just n_heads or 1, as in MHA and MQA. Using torch or triton attention
261
  implementation enables user to also use additive bias.
262
  """
263
 
264
- def __init__(self, d_model: int, n_heads: int, kv_n_heads: int, attn_impl: str='triton', clip_qkv: Optional[float]=None, qk_ln: bool=False, softmax_scale: Optional[float]=None, attn_pdrop: float=0.0, norm_type: str='low_precision_layernorm', fc_type: str='torch', device: Optional[str]=None):
265
  super().__init__()
266
  self.attn_impl = attn_impl
267
  self.clip_qkv = clip_qkv
268
  self.qk_ln = qk_ln
 
269
  self.d_model = d_model
270
  self.n_heads = n_heads
271
  self.kv_n_heads = kv_n_heads
 
272
  self.head_dim = d_model // n_heads
273
  if self.kv_n_heads <= 0:
274
  raise ValueError('kv_n_heads should be greater than zero.')
@@ -276,20 +241,25 @@ class GroupedQueryAttention(nn.Module):
276
  raise ValueError('The number of KV heads should be less than or equal to Q heads.')
277
  if self.n_heads % self.kv_n_heads != 0:
278
  raise ValueError('Each Q head should get the same number of KV heads, so n_heads must be divisible by kv_n_heads.')
 
 
279
  self.softmax_scale = softmax_scale
280
  if self.softmax_scale is None:
281
  self.softmax_scale = 1 / math.sqrt(self.d_model / self.n_heads)
282
  self.attn_dropout_p = attn_pdrop
283
- fc_kwargs = {}
284
  if fc_type != 'te':
285
  fc_kwargs['device'] = device
286
  self.Wqkv = FC_CLASS_REGISTRY[fc_type](self.d_model, self.d_model + 2 * self.kv_n_heads * self.head_dim, **fc_kwargs)
287
  fuse_splits = [i * self.head_dim for i in range(1, self.n_heads + 2 * self.kv_n_heads)]
288
  self.Wqkv._fused = (0, fuse_splits)
289
- if self.qk_ln:
290
  norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]
291
- self.q_ln = norm_class(self.d_model, device=device)
292
- self.k_ln = norm_class(self.kv_n_heads * self.head_dim, device=device)
 
 
 
293
  if self.attn_impl == 'flash':
294
  self.attn_fn = flash_attn_fn
295
  elif self.attn_impl == 'triton':
@@ -301,40 +271,72 @@ class GroupedQueryAttention(nn.Module):
301
  self.out_proj = FC_CLASS_REGISTRY[fc_type](self.d_model, self.d_model, **fc_kwargs)
302
  self.out_proj._is_residual = True
303
 
304
- def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, is_causal: bool=True, needs_weights: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor, torch.Tensor]]]:
305
  qkv = self.Wqkv(x)
306
  if self.clip_qkv:
307
  qkv = qkv.clamp(min=-self.clip_qkv, max=self.clip_qkv)
308
  (query, key, value) = qkv.split([self.d_model, self.kv_n_heads * self.head_dim, self.kv_n_heads * self.head_dim], dim=2)
309
  key_padding_mask = attention_mask
310
- if self.qk_ln:
 
 
 
 
 
311
  dtype = query.dtype
312
- query = self.q_ln(query).to(dtype)
313
- key = self.k_ln(key).to(dtype)
314
- (context, attn_weights, past_key_value) = self.attn_fn(query, key, value, self.n_heads, self.kv_n_heads, past_key_value=past_key_value, softmax_scale=self.softmax_scale, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=is_causal, dropout_p=self.attn_dropout_p, training=self.training, needs_weights=needs_weights)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  return (self.out_proj(context), attn_weights, past_key_value)
316
 
317
  class MultiheadAttention(GroupedQueryAttention):
318
  """Multi-head self attention.
319
-
320
  Using torch or triton attention implementation enables user to also use
321
  additive bias.
322
  """
323
 
324
- def __init__(self, d_model: int, n_heads: int, attn_impl: str='triton', clip_qkv: Optional[float]=None, qk_ln: bool=False, softmax_scale: Optional[float]=None, attn_pdrop: float=0.0, norm_type: str='low_precision_layernorm', fc_type: str='torch', device: Optional[str]=None):
325
- super().__init__(d_model=d_model, n_heads=n_heads, kv_n_heads=n_heads, attn_impl=attn_impl, clip_qkv=clip_qkv, qk_ln=qk_ln, softmax_scale=softmax_scale, attn_pdrop=attn_pdrop, norm_type=norm_type, fc_type=fc_type, device=device)
326
 
327
  class MultiQueryAttention(GroupedQueryAttention):
328
  """Multi-Query self attention.
329
-
330
  Using torch or triton attention implementation enables user to also use
331
  additive bias.
332
  """
333
 
334
- def __init__(self, d_model: int, n_heads: int, attn_impl: str='triton', clip_qkv: Optional[float]=None, qk_ln: bool=False, softmax_scale: Optional[float]=None, attn_pdrop: float=0.0, norm_type: str='low_precision_layernorm', fc_type: str='torch', device: Optional[str]=None):
335
- super().__init__(d_model=d_model, n_heads=n_heads, kv_n_heads=1, attn_impl=attn_impl, clip_qkv=clip_qkv, qk_ln=qk_ln, softmax_scale=softmax_scale, attn_pdrop=attn_pdrop, norm_type=norm_type, fc_type=fc_type, device=device)
336
 
337
- def attn_bias_shape(attn_impl: str, n_heads: int, seq_len: int, alibi: bool, prefix_lm: bool, causal: bool, use_sequence_id: bool) -> Optional[Tuple[int, int, int, int]]:
338
  if attn_impl == 'flash':
339
  return None
340
  elif attn_impl in ['torch', 'triton']:
@@ -359,13 +361,15 @@ def build_attn_bias(attn_impl: str, attn_bias: torch.Tensor, n_heads: int, seq_l
359
  else:
360
  raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')
361
 
362
- def gen_slopes(n_heads: int, alibi_bias_max: int=8, device: Optional[torch.device]=None) -> torch.Tensor:
363
  _n_heads = 2 ** math.ceil(math.log2(n_heads))
364
  m = torch.arange(1, _n_heads + 1, dtype=torch.float32, device=device)
365
  m = m.mul(alibi_bias_max / _n_heads)
366
  slopes = 1.0 / torch.pow(2, m)
367
  if _n_heads != n_heads:
368
  slopes = torch.concat([slopes[1::2], slopes[::2]])[:n_heads]
 
 
369
  return slopes.view(1, n_heads, 1, 1)
370
 
371
  def build_alibi_bias(n_heads: int, seq_len: int, full: bool=False, alibi_bias_max: int=8, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> torch.Tensor:
 
1
  """Attention layers."""
2
  import math
3
  import warnings
4
+ from typing import Any, Optional
5
  import torch
6
  import torch.nn as nn
7
+ import transformers
8
  from einops import rearrange
9
  from packaging import version
10
  from torch import nn
11
  from .fc import FC_CLASS_REGISTRY
12
  from .norm import NORM_CLASS_REGISTRY
13
 
 
14
  def is_flash_v2_installed(v2_version: str='2.0.0'):
15
  assert version.parse(v2_version) >= version.parse('2.0.0')
16
  try:
 
26
  return False
27
  return version.parse(flash_attn.__version__) < version.parse('2.0.0')
28
 
29
+ def is_transformers_version_gte(hf_version: str) -> bool:
30
+ return version.parse(transformers.__version__) >= version.parse(hf_version)
31
+
32
+ def check_alibi_support(attention_impl: str) -> bool:
33
+ return attention_impl != 'flash' or is_flash_v2_installed(v2_version='v2.4.2')
34
+ if is_flash_v1_installed():
35
+ import transformers
36
+ transformers.utils.is_flash_attn_available = lambda : False
37
+ from transformers.models.llama.modeling_llama import apply_rotary_pos_emb
38
 
39
  def _reset_is_causal(num_query_tokens: int, num_key_tokens: int, original_is_causal: bool) -> bool:
40
  if original_is_causal and num_query_tokens != num_key_tokens:
 
44
  return False
45
  return original_is_causal
46
 
47
+ def repeat_kv_for_gqa(hidden: torch.Tensor, n_rep: int) -> torch.Tensor:
48
+ """Perform repeat of kv heads along a particular dimension.
49
+ hidden.shape expected to be: (batch size, seq len, kv_n_heads, head_dim)
50
+ n_rep: amount of repetitions of kv_n_heads
51
+ Unlike torch.repeat_interleave, this function avoids allocating new memory.
52
+ """
53
+ if n_rep == 1:
54
+ return hidden
55
+ (b, s, kv_n_heads, d) = hidden.shape
56
+ hidden = hidden[:, :, :, None, :].expand(b, s, kv_n_heads, n_rep, d)
57
+ return hidden.reshape(b, s, kv_n_heads * n_rep, d)
58
+
59
+ def scaled_multihead_dot_product_attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, n_heads: int, kv_n_heads: int, past_key_value: Optional[tuple[torch.Tensor, torch.Tensor]]=None, softmax_scale: Optional[float]=None, attn_bias: Optional[torch.Tensor]=None, key_padding_mask: Optional[torch.Tensor]=None, is_causal: bool=False, dropout_p: float=0.0, training: bool=False, needs_weights: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor, torch.Tensor]]]:
60
  q = rearrange(query, 'b s (h d) -> b h s d', h=n_heads)
61
  k = rearrange(key, 'b s (h d) -> b h d s', h=kv_n_heads)
62
  v = rearrange(value, 'b s (h d) -> b h s d', h=kv_n_heads)
 
68
  (b, _, s_q, d) = q.shape
69
  s_k = k.size(-1)
70
  if kv_n_heads > 1 and kv_n_heads < n_heads:
71
+ k = repeat_kv_for_gqa(k.transpose(1, 2), n_heads // kv_n_heads).transpose(1, 2)
72
+ v = repeat_kv_for_gqa(v.transpose(1, 2), n_heads // kv_n_heads).transpose(1, 2)
73
  if softmax_scale is None:
74
  softmax_scale = 1 / math.sqrt(d)
75
  attn_weight = q.matmul(k) * softmax_scale
 
102
  return (out, attn_weight, past_key_value)
103
  return (out, None, past_key_value)
104
 
105
+ def check_valid_inputs(*tensors: torch.Tensor, valid_dtypes: Optional[list[torch.dtype]]=None):
106
  if valid_dtypes is None:
107
  valid_dtypes = [torch.float16, torch.bfloat16]
108
  for tensor in tensors:
 
110
  raise TypeError(f'tensor.dtype={tensor.dtype!r} must be in valid_dtypes={valid_dtypes!r}.')
111
  if not tensor.is_cuda:
112
  raise TypeError(f'Inputs must be cuda tensors (tensor.is_cuda={tensor.is_cuda!r}).')
113
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  def flash_attn_fn(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, n_heads: int, kv_n_heads: int, past_key_value: Optional[tuple[torch.Tensor, torch.Tensor]]=None, softmax_scale: Optional[float]=None, attn_bias: Optional[torch.Tensor]=None, key_padding_mask: Optional[torch.Tensor]=None, is_causal: bool=False, dropout_p: float=0.0, training: bool=False, needs_weights: bool=False, multiquery: bool=False, should_repeat_kv_for_gqa: Optional[bool]=True, sliding_window_size: int=-1, alibi_slopes: Optional[torch.Tensor]=None, flash_attn_padding_info: Optional[dict[str, torch.Tensor]]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor, torch.Tensor]]]:
115
  if key_padding_mask is not None:
116
  raise ValueError('key_padding_mask should be None for flash attn.')
 
167
  raise RuntimeError('flash-attn==1.0.9 or flash-attn==2.4.2 is required.')
168
  output = bert_padding.pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices_q, batch_size, seqlen)
169
  return (output, None, past_key_value)
170
+
171
+ def triton_flash_attn_fn(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, n_heads: int, kv_n_heads: int, past_key_value: Optional[tuple[torch.Tensor, torch.Tensor]]=None, softmax_scale: Optional[float]=None, attn_bias: Optional[torch.Tensor]=None, key_padding_mask: Optional[torch.Tensor]=None, is_causal: bool=False, dropout_p: float=0.0, training: bool=False, needs_weights: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor, torch.Tensor]]]:
172
  try:
173
  from .flash_attn_triton import flash_attn_func
174
  except:
 
182
  if not _installed:
183
  raise RuntimeError('Requirements for `attn_impl: triton` not installed. Either (1) have a CUDA-compatible GPU ' + 'and `pip install .[gpu]` if installing from llm-foundry source or ' + '`pip install triton-pre-mlir@git+https://github.com/vchiley/triton.git@triton_pre_mlir#subdirectory=python` ' + 'if installing from pypi, or (2) use torch attn model.attn_config.attn_impl=torch (torch attn_impl will be slow). ' + 'Note: (1) requires you have CMake and PyTorch already installed.')
184
  check_valid_inputs(query, key, value)
 
 
 
 
 
 
185
  if past_key_value is not None:
186
  if len(past_key_value) != 0:
187
  key = torch.cat([past_key_value[0], key], dim=1)
 
209
  key = key.repeat(1, 1, n_heads, 1)
210
  value = value.repeat(1, 1, n_heads, 1)
211
  elif kv_n_heads < n_heads:
212
+ key = repeat_kv_for_gqa(key, n_heads // kv_n_heads)
213
+ value = repeat_kv_for_gqa(value, n_heads // kv_n_heads)
214
  reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
215
  attn_output = flash_attn_func(query, key, value, attn_bias, reset_is_causal, softmax_scale)
216
  output = attn_output.view(*attn_output.shape[:2], -1)
 
218
 
219
  class GroupedQueryAttention(nn.Module):
220
  """Grouped Query Attention (GQA) is a generalization of Multi-head (MHA).
 
221
  and Multi-query attention (MQA).
 
222
  This allows the user to set a variable of number of kv_n_heads, rather than
223
  just n_heads or 1, as in MHA and MQA. Using torch or triton attention
224
  implementation enables user to also use additive bias.
225
  """
226
 
227
+ def __init__(self, d_model: int, n_heads: int, kv_n_heads: int, attn_impl: str='triton', clip_qkv: Optional[float]=None, qk_ln: bool=False, qk_gn: bool=False, softmax_scale: Optional[float]=None, attn_pdrop: float=0.0, norm_type: str='low_precision_layernorm', fc_type: str='torch', device: Optional[str]=None, bias: bool=True, sliding_window_size: int=-1):
228
  super().__init__()
229
  self.attn_impl = attn_impl
230
  self.clip_qkv = clip_qkv
231
  self.qk_ln = qk_ln
232
+ self.qk_gn = qk_gn
233
  self.d_model = d_model
234
  self.n_heads = n_heads
235
  self.kv_n_heads = kv_n_heads
236
+ self.sliding_window_size = sliding_window_size
237
  self.head_dim = d_model // n_heads
238
  if self.kv_n_heads <= 0:
239
  raise ValueError('kv_n_heads should be greater than zero.')
 
241
  raise ValueError('The number of KV heads should be less than or equal to Q heads.')
242
  if self.n_heads % self.kv_n_heads != 0:
243
  raise ValueError('Each Q head should get the same number of KV heads, so n_heads must be divisible by kv_n_heads.')
244
+ if qk_ln and qk_gn:
245
+ raise ValueError('Only one of qk_ln and qk_gn can be set to True.')
246
  self.softmax_scale = softmax_scale
247
  if self.softmax_scale is None:
248
  self.softmax_scale = 1 / math.sqrt(self.d_model / self.n_heads)
249
  self.attn_dropout_p = attn_pdrop
250
+ fc_kwargs: dict[str, Any] = {'bias': bias}
251
  if fc_type != 'te':
252
  fc_kwargs['device'] = device
253
  self.Wqkv = FC_CLASS_REGISTRY[fc_type](self.d_model, self.d_model + 2 * self.kv_n_heads * self.head_dim, **fc_kwargs)
254
  fuse_splits = [i * self.head_dim for i in range(1, self.n_heads + 2 * self.kv_n_heads)]
255
  self.Wqkv._fused = (0, fuse_splits)
256
+ if self.qk_ln or self.qk_gn:
257
  norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]
258
+ norm_size = self.head_dim if qk_gn else d_model
259
+ self.q_ln = norm_class(norm_size, device=device)
260
+ if qk_ln:
261
+ norm_size = self.head_dim * kv_n_heads
262
+ self.k_ln = norm_class(norm_size, device=device)
263
  if self.attn_impl == 'flash':
264
  self.attn_fn = flash_attn_fn
265
  elif self.attn_impl == 'triton':
 
271
  self.out_proj = FC_CLASS_REGISTRY[fc_type](self.d_model, self.d_model, **fc_kwargs)
272
  self.out_proj._is_residual = True
273
 
274
+ def forward(self, x: torch.Tensor, past_key_value: Optional[tuple[torch.Tensor, torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, rotary_emb_w_meta_info: Optional[dict]=None, is_causal: bool=True, needs_weights: bool=False, alibi_slopes: Optional[torch.Tensor]=None, flash_attn_padding_info: Optional[dict[str, torch.Tensor]]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor, torch.Tensor]]]:
275
  qkv = self.Wqkv(x)
276
  if self.clip_qkv:
277
  qkv = qkv.clamp(min=-self.clip_qkv, max=self.clip_qkv)
278
  (query, key, value) = qkv.split([self.d_model, self.kv_n_heads * self.head_dim, self.kv_n_heads * self.head_dim], dim=2)
279
  key_padding_mask = attention_mask
280
+ if self.qk_ln or self.qk_gn:
281
+ (q_shape, k_shape) = (query.shape, key.shape)
282
+ if self.qk_gn:
283
+ (b, s) = query.shape[:2]
284
+ query = query.view(b, s, self.n_heads, -1)
285
+ key = key.view(b, s, self.kv_n_heads, -1)
286
  dtype = query.dtype
287
+ query = self.q_ln(query).to(dtype).view(q_shape)
288
+ key = self.k_ln(key).to(dtype).view(k_shape)
289
+ if rotary_emb_w_meta_info is not None:
290
+ rotary_emb = rotary_emb_w_meta_info['rotary_emb']
291
+ seq_len = rotary_emb_w_meta_info['seq_len']
292
+ offset_info = rotary_emb_w_meta_info['offset_info']
293
+ (bsz, seqlen) = query.shape[:2]
294
+ query = query.view(bsz, seqlen, -1, self.head_dim)
295
+ key = key.view(bsz, seqlen, -1, self.head_dim)
296
+ if rotary_emb_w_meta_info['impl'] == 'dail':
297
+ value = value.view(bsz, seqlen, -1, self.head_dim)
298
+ kv = torch.stack([key, value], dim=2)
299
+ (query, kv) = rotary_emb(query, kv, seqlen_offset=offset_info, max_seqlen=seq_len)
300
+ [key, value] = torch.unbind(kv, dim=2)
301
+ value = value.view(bsz, seqlen, self.kv_n_heads * self.head_dim)
302
+ elif rotary_emb_w_meta_info['impl'] == 'hf':
303
+ (cos, sin) = rotary_emb(value, seq_len)
304
+ if is_transformers_version_gte('4.36'):
305
+ (query, key) = apply_rotary_pos_emb(query, key, cos, sin, offset_info, unsqueeze_dim=2)
306
+ else:
307
+ query = query.transpose(1, 2)
308
+ key = key.transpose(1, 2)
309
+ (query, key) = apply_rotary_pos_emb(query, key, cos, sin, offset_info)
310
+ query = query.transpose(1, 2)
311
+ key = key.transpose(1, 2)
312
+ query = query.view(bsz, seqlen, self.d_model)
313
+ key = key.view(bsz, seqlen, self.kv_n_heads * self.head_dim)
314
+ extra_attn_kwargs = {}
315
+ if self.attn_impl == 'flash':
316
+ key_padding_mask = None
317
+ extra_attn_kwargs = {'should_repeat_kv_for_gqa': not is_flash_v2_installed(), 'sliding_window_size': self.sliding_window_size, 'alibi_slopes': alibi_slopes, 'flash_attn_padding_info': flash_attn_padding_info}
318
+ (context, attn_weights, past_key_value) = self.attn_fn(query, key, value, self.n_heads, self.kv_n_heads, past_key_value=past_key_value, softmax_scale=self.softmax_scale, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=is_causal, dropout_p=self.attn_dropout_p, training=self.training, needs_weights=needs_weights, **extra_attn_kwargs)
319
  return (self.out_proj(context), attn_weights, past_key_value)
320
 
321
  class MultiheadAttention(GroupedQueryAttention):
322
  """Multi-head self attention.
 
323
  Using torch or triton attention implementation enables user to also use
324
  additive bias.
325
  """
326
 
327
+ def __init__(self, d_model: int, n_heads: int, attn_impl: str='triton', clip_qkv: Optional[float]=None, qk_ln: bool=False, qk_gn: bool=False, softmax_scale: Optional[float]=None, attn_pdrop: float=0.0, norm_type: str='low_precision_layernorm', fc_type: str='torch', device: Optional[str]=None, bias: bool=True, sliding_window_size: int=-1):
328
+ super().__init__(d_model=d_model, n_heads=n_heads, kv_n_heads=n_heads, attn_impl=attn_impl, clip_qkv=clip_qkv, qk_ln=qk_ln, qk_gn=qk_gn, softmax_scale=softmax_scale, attn_pdrop=attn_pdrop, norm_type=norm_type, fc_type=fc_type, device=device, bias=bias, sliding_window_size=sliding_window_size)
329
 
330
  class MultiQueryAttention(GroupedQueryAttention):
331
  """Multi-Query self attention.
 
332
  Using torch or triton attention implementation enables user to also use
333
  additive bias.
334
  """
335
 
336
+ def __init__(self, d_model: int, n_heads: int, attn_impl: str='triton', clip_qkv: Optional[float]=None, qk_ln: bool=False, qk_gn: bool=False, softmax_scale: Optional[float]=None, attn_pdrop: float=0.0, norm_type: str='low_precision_layernorm', fc_type: str='torch', device: Optional[str]=None, bias: bool=True, sliding_window_size: int=-1):
337
+ super().__init__(d_model=d_model, n_heads=n_heads, kv_n_heads=1, attn_impl=attn_impl, clip_qkv=clip_qkv, qk_ln=qk_ln, qk_gn=qk_gn, softmax_scale=softmax_scale, attn_pdrop=attn_pdrop, norm_type=norm_type, fc_type=fc_type, device=device, bias=bias, sliding_window_size=sliding_window_size)
338
 
339
+ def attn_bias_shape(attn_impl: str, n_heads: int, seq_len: int, alibi: bool, prefix_lm: bool, causal: bool, use_sequence_id: bool) -> Optional[tuple[int, int, int, int]]:
340
  if attn_impl == 'flash':
341
  return None
342
  elif attn_impl in ['torch', 'triton']:
 
361
  else:
362
  raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')
363
 
364
+ def gen_slopes(n_heads: int, alibi_bias_max: int=8, device: Optional[torch.device]=None, return_1d: bool=False) -> torch.Tensor:
365
  _n_heads = 2 ** math.ceil(math.log2(n_heads))
366
  m = torch.arange(1, _n_heads + 1, dtype=torch.float32, device=device)
367
  m = m.mul(alibi_bias_max / _n_heads)
368
  slopes = 1.0 / torch.pow(2, m)
369
  if _n_heads != n_heads:
370
  slopes = torch.concat([slopes[1::2], slopes[::2]])[:n_heads]
371
+ if return_1d:
372
+ return slopes
373
  return slopes.view(1, n_heads, 1, 1)
374
 
375
  def build_alibi_bias(n_heads: int, seq_len: int, full: bool=False, alibi_bias_max: int=8, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> torch.Tensor: