|
import math |
|
from typing import Optional |
|
|
|
import torch |
|
import torch.nn.functional as F |
|
from torch import nn |
|
|
|
|
|
class AttentionBlock(nn.Module): |
|
""" |
|
An attention block that allows spatial positions to attend to each other. Originally ported from here, but adapted |
|
to the N-d case. |
|
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. |
|
Uses three q, k, v linear layers to compute attention. |
|
|
|
Parameters: |
|
channels (:obj:`int`): The number of channels in the input and output. |
|
num_head_channels (:obj:`int`, *optional*): |
|
The number of channels in each head. If None, then `num_heads` = 1. |
|
num_groups (:obj:`int`, *optional*, defaults to 32): The number of groups to use for group norm. |
|
rescale_output_factor (:obj:`float`, *optional*, defaults to 1.0): The factor to rescale the output by. |
|
eps (:obj:`float`, *optional*, defaults to 1e-5): The epsilon value to use for group norm. |
|
""" |
|
|
|
def __init__( |
|
self, |
|
channels: int, |
|
num_head_channels: Optional[int] = None, |
|
num_groups: int = 32, |
|
rescale_output_factor = 1.0, |
|
eps = 1e-5, |
|
): |
|
super().__init__() |
|
self.channels = channels |
|
|
|
self.num_heads = channels // num_head_channels if num_head_channels is not None else 1 |
|
self.num_head_size = num_head_channels |
|
self.group_norm = nn.GroupNorm(num_channels=channels, num_groups=num_groups, eps=eps, affine=True) |
|
|
|
|
|
self.query = nn.Linear(channels, channels) |
|
self.key = nn.Linear(channels, channels) |
|
self.value = nn.Linear(channels, channels) |
|
|
|
self.rescale_output_factor = rescale_output_factor |
|
self.proj_attn = nn.Linear(channels, channels, 1) |
|
|
|
def transpose_for_scores(self, projection: torch.Tensor) -> torch.Tensor: |
|
new_projection_shape = projection.size()[:-1] + (self.num_heads, -1) |
|
|
|
new_projection = projection.view(new_projection_shape).permute(0, 2, 1, 3) |
|
return new_projection |
|
|
|
def forward(self, hidden_states): |
|
residual = hidden_states |
|
batch, channel, height, width = hidden_states.shape |
|
|
|
|
|
hidden_states = self.group_norm(hidden_states) |
|
|
|
hidden_states = hidden_states.view(batch, channel, height * width).transpose(1, 2) |
|
|
|
|
|
query_proj = self.query(hidden_states) |
|
key_proj = self.key(hidden_states) |
|
value_proj = self.value(hidden_states) |
|
|
|
|
|
query_states = self.transpose_for_scores(query_proj) |
|
key_states = self.transpose_for_scores(key_proj) |
|
value_states = self.transpose_for_scores(value_proj) |
|
|
|
|
|
scale = 1 / math.sqrt(math.sqrt(self.channels / self.num_heads)) |
|
|
|
attention_scores = torch.matmul(query_states * scale, key_states.transpose(-1, -2) * scale) |
|
attention_probs = torch.softmax(attention_scores.double(), dim=-1).type(attention_scores.dtype) |
|
|
|
|
|
hidden_states = torch.matmul(attention_probs, value_states) |
|
|
|
hidden_states = hidden_states.permute(0, 2, 1, 3).contiguous() |
|
new_hidden_states_shape = hidden_states.size()[:-2] + (self.channels,) |
|
hidden_states = hidden_states.view(new_hidden_states_shape) |
|
|
|
|
|
hidden_states = self.proj_attn(hidden_states) |
|
hidden_states = hidden_states.transpose(-1, -2).reshape(batch, channel, height, width) |
|
|
|
|
|
hidden_states = (hidden_states + residual) / self.rescale_output_factor |
|
return hidden_states |
|
|
|
|
|
class SpatialTransformer(nn.Module): |
|
""" |
|
Transformer block for image-like data. First, project the input (aka embedding) and reshape to b, t, d. Then apply |
|
standard transformer action. Finally, reshape to image. |
|
|
|
Parameters: |
|
in_channels (:obj:`int`): The number of channels in the input and output. |
|
n_heads (:obj:`int`): The number of heads to use for multi-head attention. |
|
d_head (:obj:`int`): The number of channels in each head. |
|
depth (:obj:`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. |
|
dropout (:obj:`float`, *optional*, defaults to 0.1): The dropout probability to use. |
|
context_dim (:obj:`int`, *optional*): The number of context dimensions to use. |
|
""" |
|
|
|
def __init__( |
|
self, |
|
in_channels: int, |
|
n_heads: int, |
|
d_head: int, |
|
depth: int = 1, |
|
dropout = 0.0, |
|
context_dim: Optional[int] = None, |
|
): |
|
super().__init__() |
|
self.n_heads = n_heads |
|
self.d_head = d_head |
|
self.in_channels = in_channels |
|
inner_dim = n_heads * d_head |
|
self.norm = torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) |
|
|
|
self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0) |
|
|
|
self.transformer_blocks = nn.ModuleList( |
|
[ |
|
BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim) |
|
for d in range(depth) |
|
] |
|
) |
|
|
|
self.proj_out = nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0) |
|
|
|
def _set_attention_slice(self, slice_size): |
|
for block in self.transformer_blocks: |
|
block._set_attention_slice(slice_size) |
|
|
|
def forward(self, x, context=None): |
|
|
|
b, c, h, w = x.shape |
|
x_in = x |
|
x = self.norm(x) |
|
x = self.proj_in(x) |
|
x = x.permute(0, 2, 3, 1).reshape(b, h * w, c) |
|
for block in self.transformer_blocks: |
|
x = block(x, context=context) |
|
x = x.reshape(b, h, w, c).permute(0, 3, 1, 2) |
|
x = self.proj_out(x) |
|
return x + x_in |
|
|
|
|
|
class BasicTransformerBlock(nn.Module): |
|
r""" |
|
A basic Transformer block. |
|
|
|
Parameters: |
|
dim (:obj:`int`): The number of channels in the input and output. |
|
n_heads (:obj:`int`): The number of heads to use for multi-head attention. |
|
d_head (:obj:`int`): The number of channels in each head. |
|
dropout (:obj:`float`, *optional*, defaults to 0.0): The dropout probability to use. |
|
context_dim (:obj:`int`, *optional*): The size of the context vector for cross attention. |
|
gated_ff (:obj:`bool`, *optional*, defaults to :obj:`False`): Whether to use a gated feed-forward network. |
|
checkpoint (:obj:`bool`, *optional*, defaults to :obj:`False`): Whether to use checkpointing. |
|
""" |
|
|
|
def __init__( |
|
self, |
|
dim: int, |
|
n_heads: int, |
|
d_head: int, |
|
dropout=0.0, |
|
context_dim: Optional[int] = None, |
|
gated_ff: bool = True, |
|
checkpoint: bool = True, |
|
): |
|
super().__init__() |
|
self.attn1 = CrossAttention( |
|
query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout |
|
) |
|
self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) |
|
self.attn2 = CrossAttention( |
|
query_dim=dim, context_dim=context_dim, heads=n_heads, dim_head=d_head, dropout=dropout |
|
) |
|
self.norm1 = nn.LayerNorm(dim) |
|
self.norm2 = nn.LayerNorm(dim) |
|
self.norm3 = nn.LayerNorm(dim) |
|
self.checkpoint = checkpoint |
|
|
|
def _set_attention_slice(self, slice_size): |
|
self.attn1._slice_size = slice_size |
|
self.attn2._slice_size = slice_size |
|
|
|
def forward(self, x, context=None): |
|
x = x.contiguous() if x.device.type == "mps" else x |
|
x = self.attn1(self.norm1(x)) + x |
|
x = self.attn2(self.norm2(x), context=context) + x |
|
x = self.ff(self.norm3(x)) + x |
|
return x |
|
|
|
|
|
class CrossAttention(nn.Module): |
|
r""" |
|
A cross attention layer. |
|
|
|
Parameters: |
|
query_dim (:obj:`int`): The number of channels in the query. |
|
context_dim (:obj:`int`, *optional*): |
|
The number of channels in the context. If not given, defaults to `query_dim`. |
|
heads (:obj:`int`, *optional*, defaults to 8): The number of heads to use for multi-head attention. |
|
dim_head (:obj:`int`, *optional*, defaults to 64): The number of channels in each head. |
|
dropout (:obj:`float`, *optional*, defaults to 0.0): The dropout probability to use. |
|
""" |
|
|
|
def __init__( |
|
self, query_dim: int, context_dim: Optional[int] = None, heads: int = 8, dim_head: int = 64, dropout: int = 0.0 |
|
): |
|
super().__init__() |
|
inner_dim = dim_head * heads |
|
context_dim = context_dim if context_dim is not None else query_dim |
|
|
|
self.scale = dim_head**-0.5 |
|
self.heads = heads |
|
|
|
|
|
|
|
self._slice_size = None |
|
|
|
self.to_q = nn.Linear(query_dim, inner_dim, bias=False) |
|
self.to_k = nn.Linear(context_dim, inner_dim, bias=False) |
|
self.to_v = nn.Linear(context_dim, inner_dim, bias=False) |
|
|
|
self.to_out = nn.Sequential(nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)) |
|
|
|
def reshape_heads_to_batch_dim(self, tensor): |
|
batch_size, seq_len, dim = tensor.shape |
|
head_size = self.heads |
|
tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size) |
|
tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size * head_size, seq_len, dim // head_size) |
|
return tensor |
|
|
|
def reshape_batch_dim_to_heads(self, tensor): |
|
batch_size, seq_len, dim = tensor.shape |
|
head_size = self.heads |
|
tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim) |
|
tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size // head_size, seq_len, dim * head_size) |
|
return tensor |
|
|
|
def forward(self, x, context=None, mask=None): |
|
batch_size, sequence_length, dim = x.shape |
|
|
|
q = self.to_q(x) |
|
context = context if context is not None else x |
|
k = self.to_k(context) |
|
v = self.to_v(context) |
|
|
|
q = self.reshape_heads_to_batch_dim(q) |
|
k = self.reshape_heads_to_batch_dim(k) |
|
v = self.reshape_heads_to_batch_dim(v) |
|
|
|
|
|
|
|
|
|
hidden_states = self._attention(q, k, v, sequence_length, dim) |
|
|
|
return self.to_out(hidden_states) |
|
|
|
def _attention(self, query, key, value, sequence_length, dim): |
|
batch_size_attention = query.shape[0] |
|
hidden_states = torch.zeros( |
|
(batch_size_attention, sequence_length, dim // self.heads), device=query.device, dtype=query.dtype |
|
) |
|
slice_size = self._slice_size if self._slice_size is not None else hidden_states.shape[0] |
|
for i in range(hidden_states.shape[0] // slice_size): |
|
start_idx = i * slice_size |
|
end_idx = (i + 1) * slice_size |
|
attn_slice = ( |
|
torch.einsum("b i d, b j d -> b i j", query[start_idx:end_idx], key[start_idx:end_idx]) * self.scale |
|
) |
|
attn_slice = attn_slice.softmax(dim=-1) |
|
attn_slice = torch.einsum("b i j, b j d -> b i d", attn_slice, value[start_idx:end_idx]) |
|
|
|
hidden_states[start_idx:end_idx] = attn_slice |
|
|
|
|
|
hidden_states = self.reshape_batch_dim_to_heads(hidden_states) |
|
return hidden_states |
|
|
|
|
|
class FeedForward(nn.Module): |
|
r""" |
|
A feed-forward layer. |
|
|
|
Parameters: |
|
dim (:obj:`int`): The number of channels in the input. |
|
dim_out (:obj:`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`. |
|
mult (:obj:`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension. |
|
glu (:obj:`bool`, *optional*, defaults to :obj:`False`): Whether to use GLU activation. |
|
dropout (:obj:`float`, *optional*, defaults to 0.0): The dropout probability to use. |
|
""" |
|
|
|
def __init__( |
|
self, dim: int, dim_out: Optional[int] = None, mult: int = 4, glu: bool = False, dropout = 0.0 |
|
): |
|
super().__init__() |
|
inner_dim = int(dim * mult) |
|
dim_out = dim_out if dim_out is not None else dim |
|
project_in = GEGLU(dim, inner_dim) |
|
|
|
self.net = nn.Sequential(project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out)) |
|
|
|
def forward(self, x): |
|
return self.net(x) |
|
|
|
|
|
|
|
class GEGLU(nn.Module): |
|
r""" |
|
A variant of the gated linear unit activation function from https://arxiv.org/abs/2002.05202. |
|
|
|
Parameters: |
|
dim_in (:obj:`int`): The number of channels in the input. |
|
dim_out (:obj:`int`): The number of channels in the output. |
|
""" |
|
|
|
def __init__(self, dim_in: int, dim_out: int): |
|
super().__init__() |
|
self.proj = nn.Linear(dim_in, dim_out * 2) |
|
|
|
def forward(self, x): |
|
x, gate = self.proj(x).chunk(2, dim=-1) |
|
return x * F.gelu(gate) |
|
|