|
|
|
|
|
|
|
import math |
|
|
|
import torch |
|
import torch.nn as nn |
|
from einops import rearrange |
|
from einops.layers.torch import Rearrange |
|
|
|
|
|
|
|
def FeedForward(dim, mult=4): |
|
inner_dim = int(dim * mult) |
|
return nn.Sequential( |
|
nn.LayerNorm(dim), |
|
nn.Linear(dim, inner_dim, bias=False), |
|
nn.GELU(), |
|
nn.Linear(inner_dim, dim, bias=False), |
|
) |
|
|
|
|
|
def reshape_tensor(x, heads): |
|
bs, length, width = x.shape |
|
|
|
x = x.view(bs, length, heads, -1) |
|
|
|
x = x.transpose(1, 2) |
|
|
|
x = x.reshape(bs, heads, length, -1) |
|
return x |
|
|
|
|
|
class PerceiverAttention(nn.Module): |
|
def __init__(self, *, dim, dim_head=64, heads=8): |
|
super().__init__() |
|
self.scale = dim_head ** -0.5 |
|
self.dim_head = dim_head |
|
self.heads = heads |
|
inner_dim = dim_head * heads |
|
|
|
self.norm1 = nn.LayerNorm(dim) |
|
self.norm2 = nn.LayerNorm(dim) |
|
|
|
self.to_q = nn.Linear(dim, inner_dim, bias=False) |
|
self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False) |
|
self.to_out = nn.Linear(inner_dim, dim, bias=False) |
|
|
|
def forward(self, x, latents): |
|
""" |
|
Args: |
|
x (torch.Tensor): image features |
|
shape (b, n1, D) |
|
latent (torch.Tensor): latent features |
|
shape (b, n2, D) |
|
""" |
|
x = self.norm1(x) |
|
latents = self.norm2(latents) |
|
|
|
b, l, _ = latents.shape |
|
|
|
q = self.to_q(latents) |
|
kv_input = torch.cat((x, latents), dim=-2) |
|
k, v = self.to_kv(kv_input).chunk(2, dim=-1) |
|
|
|
q = reshape_tensor(q, self.heads) |
|
k = reshape_tensor(k, self.heads) |
|
v = reshape_tensor(v, self.heads) |
|
|
|
|
|
scale = 1 / math.sqrt(math.sqrt(self.dim_head)) |
|
weight = (q * scale) @ (k * scale).transpose(-2, -1) |
|
weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype) |
|
out = weight @ v |
|
|
|
out = out.permute(0, 2, 1, 3).reshape(b, l, -1) |
|
|
|
return self.to_out(out) |
|
|
|
|
|
class PerceiverResampler(nn.Module): |
|
def __init__( |
|
self, |
|
*, |
|
dim=1024, |
|
depth=8, |
|
dim_head=64, |
|
heads=16, |
|
num_latents=8, |
|
embedding_dim=768, |
|
output_dim=1024, |
|
ff_mult=4, |
|
): |
|
super().__init__() |
|
|
|
self.latents = nn.Parameter(torch.randn(1, num_latents, dim) / dim ** 0.5) |
|
|
|
self.proj_in = nn.Linear(embedding_dim, dim) |
|
|
|
self.proj_out = nn.Linear(dim, output_dim) |
|
self.norm_out = nn.LayerNorm(output_dim) |
|
|
|
self.layers = nn.ModuleList([]) |
|
for _ in range(depth): |
|
self.layers.append( |
|
nn.ModuleList( |
|
[ |
|
PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads), |
|
FeedForward(dim=dim, mult=ff_mult), |
|
] |
|
) |
|
) |
|
|
|
def forward(self, x): |
|
|
|
latents = self.latents.repeat(x.size(0), 1, 1) |
|
|
|
x = self.proj_in(x) |
|
|
|
for attn, ff in self.layers: |
|
latents = attn(x, latents) + latents |
|
latents = ff(latents) + latents |
|
|
|
latents = self.proj_out(latents) |
|
return self.norm_out(latents) |
|
|
|
|
|
class FacePerceiverResampler(nn.Module): |
|
def __init__( |
|
self, |
|
*, |
|
dim=768, |
|
depth=4, |
|
dim_head=64, |
|
heads=16, |
|
embedding_dim=1280, |
|
output_dim=768, |
|
ff_mult=4, |
|
): |
|
super().__init__() |
|
|
|
self.proj_in = nn.Linear(embedding_dim, dim) |
|
|
|
self.proj_out = nn.Linear(dim, output_dim) |
|
self.norm_out = nn.LayerNorm(output_dim) |
|
|
|
self.layers = nn.ModuleList([]) |
|
for _ in range(depth): |
|
self.layers.append( |
|
nn.ModuleList( |
|
[ |
|
PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads), |
|
FeedForward(dim=dim, mult=ff_mult), |
|
] |
|
) |
|
) |
|
|
|
def forward(self, latents, x): |
|
|
|
x = self.proj_in(x) |
|
|
|
for attn, ff in self.layers: |
|
latents = attn(x, latents) + latents |
|
latents = ff(latents) + latents |
|
|
|
latents = self.proj_out(latents) |
|
return self.norm_out(latents) |
|
|
|
|
|
class Resampler(nn.Module): |
|
def __init__( |
|
self, |
|
dim=1024, |
|
depth=8, |
|
dim_head=64, |
|
heads=16, |
|
num_queries=8, |
|
embedding_dim=768, |
|
output_dim=1024, |
|
ff_mult=4, |
|
max_seq_len: int = 257, |
|
apply_pos_emb: bool = False, |
|
num_latents_mean_pooled: int = 0, |
|
): |
|
super().__init__() |
|
self.pos_emb = nn.Embedding(max_seq_len, embedding_dim) if apply_pos_emb else None |
|
|
|
self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5) |
|
|
|
self.proj_in = nn.Linear(embedding_dim, dim) |
|
|
|
self.proj_out = nn.Linear(dim, output_dim) |
|
self.norm_out = nn.LayerNorm(output_dim) |
|
|
|
self.to_latents_from_mean_pooled_seq = ( |
|
nn.Sequential( |
|
nn.LayerNorm(dim), |
|
nn.Linear(dim, dim * num_latents_mean_pooled), |
|
Rearrange("b (n d) -> b n d", n=num_latents_mean_pooled), |
|
) |
|
if num_latents_mean_pooled > 0 |
|
else None |
|
) |
|
|
|
self.layers = nn.ModuleList([]) |
|
for _ in range(depth): |
|
self.layers.append( |
|
nn.ModuleList( |
|
[ |
|
PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads), |
|
FeedForward(dim=dim, mult=ff_mult), |
|
] |
|
) |
|
) |
|
|
|
def forward(self, x): |
|
if self.pos_emb is not None: |
|
n, device = x.shape[1], x.device |
|
pos_emb = self.pos_emb(torch.arange(n, device=device)) |
|
x = x + pos_emb |
|
|
|
latents = self.latents.repeat(x.size(0), 1, 1) |
|
|
|
x = self.proj_in(x) |
|
|
|
if self.to_latents_from_mean_pooled_seq: |
|
meanpooled_seq = masked_mean(x, dim=1, mask=torch.ones(x.shape[:2], device=x.device, dtype=torch.bool)) |
|
meanpooled_latents = self.to_latents_from_mean_pooled_seq(meanpooled_seq) |
|
latents = torch.cat((meanpooled_latents, latents), dim=-2) |
|
|
|
for attn, ff in self.layers: |
|
latents = attn(x, latents) + latents |
|
latents = ff(latents) + latents |
|
|
|
latents = self.proj_out(latents) |
|
return self.norm_out(latents) |
|
|
|
|
|
def masked_mean(t, *, dim, mask=None): |
|
if mask is None: |
|
return t.mean(dim=dim) |
|
|
|
denom = mask.sum(dim=dim, keepdim=True) |
|
mask = rearrange(mask, "b n -> b n 1") |
|
masked_t = t.masked_fill(~mask, 0.0) |
|
|
|
return masked_t.sum(dim=dim) / denom.clamp(min=1e-5) |
|
|
|
|
|
class ProjPlusModel(torch.nn.Module): |
|
def __init__(self, cross_attention_dim=768, id_embeddings_dim=512, clip_embeddings_dim=1280, num_tokens=4): |
|
super().__init__() |
|
|
|
self.cross_attention_dim = cross_attention_dim |
|
self.num_tokens = num_tokens |
|
|
|
self.proj = torch.nn.Sequential( |
|
torch.nn.Linear(id_embeddings_dim, id_embeddings_dim * 2), |
|
torch.nn.GELU(), |
|
torch.nn.Linear(id_embeddings_dim * 2, cross_attention_dim * num_tokens), |
|
) |
|
self.norm = torch.nn.LayerNorm(cross_attention_dim) |
|
|
|
self.perceiver_resampler = FacePerceiverResampler( |
|
dim=cross_attention_dim, |
|
depth=4, |
|
dim_head=64, |
|
heads=cross_attention_dim // 64, |
|
embedding_dim=clip_embeddings_dim, |
|
output_dim=cross_attention_dim, |
|
ff_mult=4, |
|
) |
|
|
|
def forward(self, id_embeds, clip_embeds, shortcut=False, scale=1.0): |
|
x = self.proj(id_embeds) |
|
x = x.reshape(-1, self.num_tokens, self.cross_attention_dim) |
|
x = self.norm(x) |
|
out = self.perceiver_resampler(x, clip_embeds) |
|
if shortcut: |
|
out = x + scale * out |
|
return out |
|
|
|
|
|
if __name__ == "__main__": |
|
model = PerceiverResampler( |
|
dim=1024, |
|
depth=8, |
|
dim_head=64, |
|
heads=16, |
|
num_latents=8, |
|
embedding_dim=4096, |
|
output_dim=1024, |
|
ff_mult=4, |
|
) |
|
|
|
x = torch.rand(2, 77, 4096) |
|
|
|
with torch.no_grad(): |
|
out = model(x) |
|
print(out.shape) |
|
|
|
print(sum([p.numel() for p in model.parameters()]) / 1e6) |
|
|