teowu commited on
Commit
4ca9f48
1 Parent(s): 4638546

Upload visual_encoder.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. visual_encoder.py +928 -0
visual_encoder.py ADDED
@@ -0,0 +1,928 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from typing import Any, Optional, Tuple, Union
3
+
4
+ from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, BaseModelOutputWithPastAndCrossAttentions
5
+ from transformers.modeling_utils import PreTrainedModel
6
+ from transformers.pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
7
+
8
+ import numpy as np
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.utils.checkpoint
12
+ from icecream import ic
13
+
14
+ def get_abs_pos(abs_pos, tgt_size):
15
+ # abs_pos: L, C
16
+ # tgt_size: M
17
+ # return: M, C
18
+ src_size = int(math.sqrt(abs_pos.size(0)))
19
+ tgt_size = int(math.sqrt(tgt_size))
20
+ dtype = abs_pos.dtype
21
+
22
+ if src_size != tgt_size:
23
+ return F.interpolate(
24
+ abs_pos.float().reshape(1, src_size, src_size, -1).permute(0, 3, 1, 2),
25
+ size=(tgt_size, tgt_size),
26
+ mode="bicubic",
27
+ align_corners=False,
28
+ ).permute(0, 2, 3, 1).flatten(0, 2).to(dtype=dtype)
29
+ else:
30
+ return abs_pos
31
+
32
+ # https://github.com/facebookresearch/mae/blob/efb2a8062c206524e35e47d04501ed4f544c0ae8/util/pos_embed.py#L20
33
+ def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
34
+ """
35
+ grid_size: int of the grid height and width
36
+ return:
37
+ pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
38
+ """
39
+ grid_h = np.arange(grid_size, dtype=np.float32)
40
+ grid_w = np.arange(grid_size, dtype=np.float32)
41
+ grid = np.meshgrid(grid_w, grid_h) # here w goes first
42
+ grid = np.stack(grid, axis=0)
43
+
44
+ grid = grid.reshape([2, 1, grid_size, grid_size])
45
+ pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
46
+ if cls_token:
47
+ pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
48
+ return pos_embed
49
+
50
+
51
+ def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
52
+ assert embed_dim % 2 == 0
53
+
54
+ # use half of dimensions to encode grid_h
55
+ emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
56
+ emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
57
+
58
+ emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
59
+ return emb
60
+
61
+
62
+ def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
63
+ """
64
+ embed_dim: output dimension for each position
65
+ pos: a list of positions to be encoded: size (M,)
66
+ out: (M, D)
67
+ """
68
+ assert embed_dim % 2 == 0
69
+ omega = np.arange(embed_dim // 2, dtype=np.float32)
70
+ omega /= embed_dim / 2.
71
+ omega = 1. / 10000**omega # (D/2,)
72
+
73
+ pos = pos.reshape(-1) # (M,)
74
+ out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product
75
+
76
+ emb_sin = np.sin(out) # (M, D/2)
77
+ emb_cos = np.cos(out) # (M, D/2)
78
+
79
+ emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
80
+ return emb
81
+
82
+
83
+
84
+ class MplugOwlVisionEmbeddings(nn.Module):
85
+ def __init__(self, config):
86
+ super().__init__()
87
+ self.config = config
88
+ self.hidden_size = config.hidden_size
89
+ self.image_size = config.image_size
90
+ self.patch_size = config.patch_size
91
+
92
+ self.cls_token = nn.Parameter(torch.randn(1, 1, self.hidden_size))
93
+
94
+ self.patch_embed = nn.Conv2d(
95
+ in_channels=3,
96
+ out_channels=self.hidden_size,
97
+ kernel_size=self.patch_size,
98
+ stride=self.patch_size,
99
+ bias=False,
100
+ )
101
+
102
+ self.num_patches = (self.image_size // self.patch_size) ** 2
103
+
104
+ self.position_embedding = nn.Parameter(torch.randn(1, self.num_patches + 1, self.hidden_size))
105
+
106
+ self.pre_layernorm = nn.LayerNorm(self.hidden_size, eps=config.layer_norm_eps)
107
+
108
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
109
+ batch_size = pixel_values.size(0)
110
+ image_embeds = self.patch_embed(pixel_values)
111
+ image_embeds = image_embeds.flatten(2).transpose(1, 2)
112
+
113
+ class_embeds = self.cls_token.expand(batch_size, 1, -1).to(image_embeds.dtype)
114
+ embeddings = torch.cat([class_embeds, image_embeds], dim=1)
115
+ embeddings = embeddings + self.position_embedding[:, : embeddings.size(1)].to(image_embeds.dtype)
116
+ embeddings = self.pre_layernorm(embeddings)
117
+ return embeddings
118
+
119
+
120
+
121
+ class MplugOwlVisionAttention(nn.Module):
122
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
123
+
124
+ def __init__(self, config):
125
+ super().__init__()
126
+ self.config = config
127
+ self.hidden_size = config.hidden_size
128
+ self.num_heads = config.num_attention_heads
129
+ self.head_dim = self.hidden_size // self.num_heads
130
+ if self.head_dim * self.num_heads != self.hidden_size:
131
+ raise ValueError(
132
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`:"
133
+ f" {self.num_heads})."
134
+ )
135
+ self.scale = self.head_dim**-0.5
136
+ self.dropout = nn.Dropout(config.attention_dropout)
137
+
138
+ self.query_key_value = nn.Linear(self.hidden_size, 3 * self.hidden_size)
139
+ self.dense = nn.Linear(self.hidden_size, self.hidden_size)
140
+
141
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
142
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
143
+
144
+ def forward(
145
+ self,
146
+ hidden_states: torch.Tensor,
147
+ head_mask: Optional[torch.Tensor] = None,
148
+ output_attentions: Optional[bool] = False,
149
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
150
+ """Input shape: Batch x Time x Channel"""
151
+
152
+ bsz, seq_len, embed_dim = hidden_states.size()
153
+
154
+ mixed_qkv = self.query_key_value(hidden_states)
155
+
156
+ mixed_qkv = mixed_qkv.reshape(bsz, seq_len, self.num_heads, 3, embed_dim // self.num_heads).permute(
157
+ 3, 0, 2, 1, 4
158
+ ) # [3, b, np, sq, hn]
159
+ query_states, key_states, value_states = (
160
+ mixed_qkv[0],
161
+ mixed_qkv[1],
162
+ mixed_qkv[2],
163
+ )
164
+ # if self.config.use_flash_attn and flash_attn_func is not None:
165
+ if False:
166
+ # [b*sq, np, hn]
167
+ query_states = query_states.permute(0, 2, 1, 3).contiguous()
168
+ query_states = query_states.view(query_states.size(0) * query_states.size(1), query_states.size(2), -1)
169
+
170
+ key_states = key_states.permute(0, 2, 1, 3).contiguous()
171
+ key_states = key_states.view(key_states.size(0) * key_states.size(1), key_states.size(2), -1)
172
+
173
+ value_states = value_states.permute(0, 2, 1, 3).contiguous()
174
+ value_states = value_states.view(value_states.size(0) * value_states.size(1), value_states.size(2), -1)
175
+
176
+ cu_seqlens = torch.arange(
177
+ 0, (bsz + 1) * seq_len, step=seq_len, dtype=torch.int32, device=query_states.device
178
+ )
179
+
180
+ context_layer = flash_attn_func(
181
+ query_states,
182
+ key_states,
183
+ value_states,
184
+ cu_seqlens,
185
+ cu_seqlens,
186
+ seq_len,
187
+ seq_len,
188
+ self.dropout if self.training else 0.0,
189
+ softmax_scale=self.scale,
190
+ causal=False,
191
+ return_attn_probs=False,
192
+ )
193
+ # [b*sq, np, hn] => [b, sq, np, hn]
194
+ context_layer = context_layer.view(bsz, seq_len, context_layer.size(1), context_layer.size(2))
195
+ else:
196
+ # Take the dot product between "query" and "key" to get the raw attention scores.
197
+ attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2))
198
+
199
+ attention_scores = attention_scores * self.scale
200
+
201
+ # Normalize the attention scores to probabilities.
202
+ attention_probs = torch.softmax(attention_scores, dim=-1)
203
+
204
+ # This is actually dropping out entire tokens to attend to, which might
205
+ # seem a bit unusual, but is taken from the original Transformer paper.
206
+ attention_probs = self.dropout(attention_probs)
207
+
208
+ # Mask heads if we want to
209
+ if head_mask is not None:
210
+ attention_probs = attention_probs * head_mask
211
+
212
+ context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3)
213
+
214
+ new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size,)
215
+ context_layer = context_layer.reshape(new_context_layer_shape)
216
+
217
+ output = self.dense(context_layer)
218
+
219
+ outputs = (output, attention_probs) if output_attentions else (output, None)
220
+
221
+ return outputs
222
+
223
+
224
+ class QuickGELU(nn.Module):
225
+ def forward(self, x: torch.Tensor):
226
+ return x * torch.sigmoid(1.702 * x)
227
+
228
+
229
+ class MplugOwlMLP(nn.Module):
230
+ def __init__(self, config):
231
+ super().__init__()
232
+ self.config = config
233
+ self.activation_fn = QuickGELU()
234
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
235
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
236
+
237
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
238
+ hidden_states = self.fc1(hidden_states)
239
+ hidden_states = self.activation_fn(hidden_states)
240
+ hidden_states = self.fc2(hidden_states)
241
+ return hidden_states
242
+
243
+
244
+ class MplugOwlVisionEncoderLayer(nn.Module):
245
+ def __init__(self, config):
246
+ super().__init__()
247
+ self.hidden_size = config.hidden_size
248
+ self.self_attn = MplugOwlVisionAttention(config)
249
+ self.input_layernorm = nn.LayerNorm(self.hidden_size, eps=config.layer_norm_eps)
250
+ self.mlp = MplugOwlMLP(config)
251
+ self.post_attention_layernorm = nn.LayerNorm(self.hidden_size, eps=config.layer_norm_eps)
252
+
253
+ def forward(
254
+ self,
255
+ hidden_states: torch.Tensor,
256
+ attention_mask: torch.Tensor,
257
+ output_attentions: Optional[bool] = False,
258
+ ) -> Tuple[torch.FloatTensor]:
259
+ """
260
+ Args:
261
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
262
+ attention_mask (`torch.FloatTensor`): attention mask of size
263
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
264
+ `(config.encoder_attention_heads,)`.
265
+ output_attentions (`bool`, *optional*):
266
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
267
+ returned tensors for more detail.
268
+ """
269
+ residual = hidden_states
270
+
271
+ hidden_states = self.input_layernorm(hidden_states)
272
+ hidden_states, attn_weights = self.self_attn(
273
+ hidden_states=hidden_states,
274
+ head_mask=attention_mask,
275
+ output_attentions=output_attentions,
276
+ )
277
+ hidden_states = hidden_states + residual
278
+ residual = hidden_states
279
+ hidden_states = self.post_attention_layernorm(hidden_states)
280
+ hidden_states = self.mlp(hidden_states)
281
+
282
+ hidden_states = hidden_states + residual
283
+
284
+ outputs = (hidden_states,)
285
+
286
+ if output_attentions:
287
+ outputs += (attn_weights,)
288
+
289
+ return outputs
290
+
291
+
292
+ class MplugOwlVisionEncoder(nn.Module):
293
+ """
294
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
295
+ [`MplugOwlVisionEncoderLayer`].
296
+
297
+ Args:
298
+ config (`MplugOwlVisionConfig`):
299
+ The corresponding vision configuration for the `MplugOwlEncoder`.
300
+ """
301
+
302
+ def __init__(self, config):
303
+ super().__init__()
304
+ self.config = config
305
+ self.layers = nn.ModuleList([MplugOwlVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
306
+ self.gradient_checkpointing = True
307
+
308
+ def forward(
309
+ self,
310
+ inputs_embeds,
311
+ attention_mask: Optional[torch.Tensor] = None,
312
+ output_attentions: Optional[bool] = None,
313
+ output_hidden_states: Optional[bool] = None,
314
+ return_dict: Optional[bool] = None,
315
+ ) -> Union[Tuple, BaseModelOutput]:
316
+ r"""
317
+ Args:
318
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
319
+ Embedded representation of the inputs. Should be float, not int tokens.
320
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
321
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
322
+
323
+ - 1 for tokens that are **not masked**,
324
+ - 0 for tokens that are **masked**.
325
+
326
+ [What are attention masks?](../glossary#attention-mask)
327
+ output_attentions (`bool`, *optional*):
328
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
329
+ returned tensors for more detail.
330
+ output_hidden_states (`bool`, *optional*):
331
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
332
+ for more detail.
333
+ return_dict (`bool`, *optional*):
334
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
335
+ """
336
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
337
+ output_hidden_states = (
338
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
339
+ )
340
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
341
+
342
+ encoder_states = () if output_hidden_states else None
343
+ all_attentions = () if output_attentions else None
344
+
345
+ hidden_states = inputs_embeds
346
+ for idx, encoder_layer in enumerate(self.layers):
347
+ if output_hidden_states:
348
+ encoder_states = encoder_states + (hidden_states,)
349
+ if self.gradient_checkpointing and self.training:
350
+
351
+ def create_custom_forward(module):
352
+ def custom_forward(*inputs):
353
+ return module(*inputs, output_attentions)
354
+
355
+ return custom_forward
356
+
357
+ layer_outputs = torch.utils.checkpoint.checkpoint(
358
+ create_custom_forward(encoder_layer),
359
+ hidden_states,
360
+ attention_mask,
361
+ )
362
+ else:
363
+ layer_outputs = encoder_layer(
364
+ hidden_states,
365
+ attention_mask,
366
+ output_attentions=output_attentions,
367
+ )
368
+
369
+ hidden_states = layer_outputs[0]
370
+
371
+ if output_attentions:
372
+ all_attentions = all_attentions + (layer_outputs[1],)
373
+
374
+ if output_hidden_states:
375
+ encoder_states = encoder_states + (hidden_states,)
376
+
377
+ if not return_dict:
378
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
379
+ return BaseModelOutput(
380
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
381
+ )
382
+
383
+
384
+ class MplugOwlVisionModel(PreTrainedModel):
385
+ main_input_name = "pixel_values"
386
+
387
+ def __init__(self, config):
388
+ super().__init__(config)
389
+ self.config = config
390
+ self.hidden_size = config.hidden_size
391
+
392
+ self.embeddings = MplugOwlVisionEmbeddings(config)
393
+ self.encoder = MplugOwlVisionEncoder(config)
394
+ self.post_layernorm = nn.LayerNorm(self.hidden_size, eps=config.layer_norm_eps)
395
+
396
+ self.post_init()
397
+
398
+
399
+ def forward(
400
+ self,
401
+ pixel_values: Optional[torch.FloatTensor] = None,
402
+ output_attentions: Optional[bool] = None,
403
+ output_hidden_states: Optional[bool] = None,
404
+ return_dict: Optional[bool] = None,
405
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
406
+ r"""
407
+ Returns:
408
+
409
+ """
410
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
411
+ output_hidden_states = (
412
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
413
+ )
414
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
415
+
416
+ if pixel_values is None:
417
+ raise ValueError("You have to specify pixel_values")
418
+
419
+ hidden_states = self.embeddings(pixel_values)
420
+
421
+ encoder_outputs = self.encoder(
422
+ inputs_embeds=hidden_states,
423
+ output_attentions=output_attentions,
424
+ output_hidden_states=output_hidden_states,
425
+ return_dict=return_dict,
426
+ )
427
+
428
+ last_hidden_state = encoder_outputs[0]
429
+ last_hidden_state = self.post_layernorm(last_hidden_state)
430
+
431
+ pooled_output = last_hidden_state[:, 0, :]
432
+ pooled_output = self.post_layernorm(pooled_output)
433
+
434
+ if not return_dict:
435
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
436
+
437
+ return BaseModelOutputWithPooling(
438
+ last_hidden_state=last_hidden_state,
439
+ pooler_output=pooled_output,
440
+ hidden_states=encoder_outputs.hidden_states,
441
+ attentions=encoder_outputs.attentions,
442
+ )
443
+
444
+ def get_input_embeddings(self):
445
+ return self.embeddings
446
+
447
+
448
+ class MplugOwlVisualAbstractorMLP(nn.Module):
449
+ def __init__(self, config):
450
+ super().__init__()
451
+ self.config = config
452
+ in_features = config.hidden_size
453
+ self.act = nn.SiLU()
454
+
455
+ self.w1 = nn.Linear(in_features, config.intermediate_size)
456
+ self.w2 = nn.Linear(config.intermediate_size, in_features)
457
+ self.w3 = nn.Linear(in_features, config.intermediate_size)
458
+ self.ffn_ln = nn.LayerNorm(config.intermediate_size, eps=config.layer_norm_eps)
459
+
460
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
461
+ hidden_states = self.act(self.w1(hidden_states)) * self.w3(hidden_states)
462
+ hidden_states = self.ffn_ln(hidden_states)
463
+ hidden_states = self.w2(hidden_states)
464
+ return hidden_states
465
+
466
+
467
+ class MplugOwlVisualAbstractorMultiHeadAttention(nn.Module):
468
+ def __init__(self, config):
469
+ super().__init__()
470
+ self.config = config
471
+ if config.hidden_size % config.num_attention_heads != 0:
472
+ raise ValueError(
473
+ "The hidden size (%d) is not a multiple of the number of attention heads (%d)"
474
+ % (config.hidden_size, config.num_attention_heads)
475
+ )
476
+
477
+ self.num_attention_heads = config.num_attention_heads
478
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
479
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
480
+
481
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
482
+ self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size)
483
+ self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size)
484
+
485
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
486
+ self.save_attention = False
487
+
488
+ # self.q_pos_embed = nn.Parameter(
489
+ # torch.from_numpy(get_1d_sincos_pos_embed_from_grid(config.hidden_size, np.arange(config.num_learnable_queries, dtype=np.float32))).float()
490
+ # ).requires_grad_(False)
491
+ # grids = config.grid_size
492
+ # self.k_pos_embed = nn.Parameter(
493
+ # torch.from_numpy(get_2d_sincos_pos_embed(config.hidden_size, grids, cls_token=True)).float()
494
+ # ).requires_grad_(False)
495
+ grids = config.grid_size
496
+ self.register_buffer(
497
+ 'q_pos_embed',
498
+ torch.from_numpy(get_1d_sincos_pos_embed_from_grid(config.hidden_size, np.arange(config.num_learnable_queries, dtype=np.float32))).float()
499
+ )
500
+ self.register_buffer(
501
+ 'k_pos_embed',
502
+ torch.from_numpy(get_2d_sincos_pos_embed(config.hidden_size, grids, cls_token=True)).float()
503
+ )
504
+
505
+
506
+ def save_attn_gradients(self, attn_gradients):
507
+ self.attn_gradients = attn_gradients
508
+
509
+ def get_attn_gradients(self):
510
+ return self.attn_gradients
511
+
512
+ def save_attention_map(self, attention_map):
513
+ self.attention_map = attention_map
514
+
515
+ def get_attention_map(self):
516
+ return self.attention_map
517
+
518
+ def transpose_for_scores(self, x):
519
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
520
+ x = x.view(*new_x_shape)
521
+ return x.permute(0, 2, 1, 3)
522
+
523
+ def forward(
524
+ self,
525
+ hidden_states,
526
+ attention_mask=None,
527
+ head_mask=None,
528
+ encoder_hidden_states=None,
529
+ encoder_attention_mask=None,
530
+ past_key_value=None,
531
+ output_attentions=False,
532
+ ):
533
+ # If this is instantiated as a cross-attention module, the keys
534
+ # and values come from an encoder; the attention mask needs to be
535
+ # such that the encoder's padding tokens are not attended to.
536
+
537
+ qk_pos_embed = torch.cat([self.q_pos_embed, self.k_pos_embed], dim = 0).unsqueeze(0).to(dtype=hidden_states.dtype)
538
+
539
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states + qk_pos_embed))
540
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
541
+ attention_mask = encoder_attention_mask
542
+
543
+ mixed_query_layer = self.query(hidden_states + self.q_pos_embed.unsqueeze(0).to(dtype=hidden_states.dtype))
544
+
545
+ query_layer = self.transpose_for_scores(mixed_query_layer)
546
+
547
+ past_key_value = (key_layer, value_layer)
548
+
549
+ # Take the dot product between "query" and "key" to get the raw attention scores.
550
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
551
+
552
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
553
+
554
+ if attention_mask is not None:
555
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
556
+ attention_scores = attention_scores + attention_mask
557
+
558
+ # Normalize the attention scores to probabilities.
559
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
560
+
561
+ if self.save_attention:
562
+ self.save_attention_map(attention_probs)
563
+ attention_probs.register_hook(self.save_attn_gradients)
564
+
565
+ # This is actually dropping out entire tokens to attend to, which might
566
+ # seem a bit unusual, but is taken from the original Transformer paper.
567
+ attention_probs_dropped = self.dropout(attention_probs)
568
+
569
+ # Mask heads if we want to
570
+ if head_mask is not None:
571
+ attention_probs_dropped = attention_probs_dropped * head_mask
572
+
573
+ context_layer = torch.matmul(attention_probs_dropped, value_layer)
574
+
575
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
576
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
577
+ context_layer = context_layer.view(*new_context_layer_shape)
578
+
579
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
580
+
581
+ outputs = outputs + (past_key_value,)
582
+ return outputs
583
+
584
+
585
+ class MplugOwlVisualAbstractorCrossOutput(nn.Module):
586
+ def __init__(self, config):
587
+ super().__init__()
588
+ dim = config.hidden_size
589
+ self.out_proj = nn.Linear(dim, dim, bias=True)
590
+ self.norm2 = nn.LayerNorm(dim)
591
+ self.mlp = MplugOwlVisualAbstractorMLP(config)
592
+
593
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
594
+ input_tensor = input_tensor + self.out_proj(hidden_states)
595
+ input_tensor = input_tensor + self.mlp(self.norm2(input_tensor))
596
+ return input_tensor
597
+
598
+
599
+ class MplugOwlVisualAbstractorAttention(nn.Module):
600
+ def __init__(self, config):
601
+ super().__init__()
602
+ self.attention = MplugOwlVisualAbstractorMultiHeadAttention(config)
603
+ self.output = MplugOwlVisualAbstractorCrossOutput(config)
604
+ self.pruned_heads = set()
605
+ self.norm1 = nn.LayerNorm(config.hidden_size)
606
+ self.normk = nn.LayerNorm(config.hidden_size)
607
+
608
+ def prune_heads(self, heads):
609
+ if len(heads) == 0:
610
+ return
611
+ heads, index = find_pruneable_heads_and_indices(
612
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
613
+ )
614
+
615
+ # Prune linear layers
616
+ self.attention.query = prune_linear_layer(self.attention.query, index)
617
+ self.attention.key = prune_linear_layer(self.attention.key, index)
618
+ self.attention.value = prune_linear_layer(self.attention.value, index)
619
+ self.output.dense = prune_linear_layer(self.output.out_proj, index, dim=1)
620
+
621
+ # Update hyper params and store pruned heads
622
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
623
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
624
+ self.pruned_heads = self.pruned_heads.union(heads)
625
+
626
+ def forward(
627
+ self,
628
+ hidden_states: torch.Tensor,
629
+ attention_mask: Optional[torch.FloatTensor] = None,
630
+ head_mask: Optional[torch.FloatTensor] = None,
631
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
632
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
633
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
634
+ output_attentions: Optional[bool] = False,
635
+ ) -> Tuple[torch.Tensor]:
636
+ # HACK we apply norm on q and k
637
+ hidden_states = self.norm1(hidden_states)
638
+ encoder_hidden_states = self.normk(encoder_hidden_states)
639
+ encoder_hidden_states = torch.cat([hidden_states, encoder_hidden_states], dim=1)
640
+ encoder_attention_mask = torch.cat([attention_mask, encoder_attention_mask], dim=-1)
641
+ self_outputs = self.attention(
642
+ hidden_states,
643
+ attention_mask,
644
+ head_mask,
645
+ encoder_hidden_states,
646
+ encoder_attention_mask,
647
+ past_key_value,
648
+ output_attentions,
649
+ )
650
+ attention_output = self.output(self_outputs[0], hidden_states)
651
+ # add attentions if we output them
652
+ outputs = (attention_output,) + self_outputs[1:]
653
+ return outputs
654
+
655
+
656
+ class MplugOwlVisualAbstractorLayer(nn.Module):
657
+ def __init__(self, config, layer_idx):
658
+ super().__init__()
659
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
660
+ self.seq_len_dim = 1
661
+
662
+ self.layer_idx = layer_idx
663
+
664
+ self.crossattention = MplugOwlVisualAbstractorAttention(config)
665
+ self.has_cross_attention = True
666
+
667
+ def forward(
668
+ self,
669
+ hidden_states,
670
+ attention_mask=None,
671
+ head_mask=None,
672
+ encoder_hidden_states=None,
673
+ encoder_attention_mask=None,
674
+ output_attentions=False,
675
+ ):
676
+ if encoder_hidden_states is None:
677
+ raise ValueError("encoder_hidden_states must be given for cross-attention layers")
678
+ cross_attention_outputs = self.crossattention(
679
+ hidden_states,
680
+ attention_mask,
681
+ head_mask,
682
+ encoder_hidden_states,
683
+ encoder_attention_mask,
684
+ output_attentions=output_attentions,
685
+ )
686
+ query_attention_output = cross_attention_outputs[0]
687
+
688
+ outputs = (query_attention_output,)
689
+ return outputs
690
+
691
+
692
+ class MplugOwlVisualAbstractorEncoder(nn.Module):
693
+ def __init__(self, config):
694
+ super().__init__()
695
+ self.config = config
696
+ self.layers = nn.ModuleList(
697
+ [MplugOwlVisualAbstractorLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
698
+ )
699
+ self.gradient_checkpointing = True
700
+
701
+ def forward(
702
+ self,
703
+ hidden_states,
704
+ attention_mask=None,
705
+ head_mask=None,
706
+ encoder_hidden_states=None,
707
+ encoder_attention_mask=None,
708
+ past_key_values=None,
709
+ output_attentions=False,
710
+ output_hidden_states=False,
711
+ return_dict=True,
712
+ ):
713
+ all_hidden_states = () if output_hidden_states else None
714
+
715
+ for i in range(self.config.num_hidden_layers):
716
+ layer_module = self.layers[i]
717
+ if output_hidden_states:
718
+ all_hidden_states = all_hidden_states + (hidden_states,)
719
+
720
+ layer_head_mask = head_mask[i] if head_mask is not None else None
721
+ past_key_value = past_key_values[i] if past_key_values is not None else None
722
+
723
+ if getattr(self.config, "gradient_checkpointing", False) and self.training:
724
+
725
+ def create_custom_forward(module):
726
+ def custom_forward(*inputs):
727
+ return module(*inputs, past_key_value, output_attentions)
728
+
729
+ return custom_forward
730
+
731
+ layer_outputs = torch.utils.checkpoint.checkpoint(
732
+ create_custom_forward(layer_module),
733
+ hidden_states,
734
+ attention_mask,
735
+ layer_head_mask,
736
+ encoder_hidden_states,
737
+ encoder_attention_mask,
738
+ )
739
+ else:
740
+ layer_outputs = layer_module(
741
+ hidden_states,
742
+ attention_mask,
743
+ layer_head_mask,
744
+ encoder_hidden_states,
745
+ encoder_attention_mask,
746
+ output_attentions,
747
+ )
748
+
749
+ hidden_states = layer_outputs[0]
750
+
751
+ return BaseModelOutput(
752
+ last_hidden_state=hidden_states,
753
+ )
754
+
755
+
756
+ class MplugOwlVisualAbstractorModel(PreTrainedModel):
757
+ def __init__(self, config, language_hidden_size):
758
+ super().__init__(config)
759
+ self.config = config
760
+
761
+ self.encoder = MplugOwlVisualAbstractorEncoder(config)
762
+ self.visual_fc = torch.nn.Linear(config.hidden_size, language_hidden_size)
763
+ self.query_embeds = torch.nn.Parameter(torch.randn(1, config.num_learnable_queries, config.hidden_size))
764
+ self.vit_eos = torch.nn.Parameter(torch.randn(1, 1, language_hidden_size))
765
+
766
+ self.post_init()
767
+
768
+ def _prune_heads(self, heads_to_prune):
769
+ """
770
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
771
+ class PreTrainedModel
772
+ """
773
+ for layer, heads in heads_to_prune.items():
774
+ self.encoder.layer[layer].attention.prune_heads(heads)
775
+
776
+ def get_extended_attention_mask(
777
+ self,
778
+ attention_mask: torch.Tensor,
779
+ input_shape: Tuple[int],
780
+ device: torch.device,
781
+ ) -> torch.Tensor:
782
+ """
783
+ Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
784
+
785
+ Arguments:
786
+ attention_mask (`torch.Tensor`):
787
+ Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
788
+ input_shape (`Tuple[int]`):
789
+ The shape of the input to the model.
790
+ device: (`torch.device`):
791
+ The device of the input to the model.
792
+
793
+ Returns:
794
+ `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
795
+ """
796
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
797
+ # ourselves in which case we just need to make it broadcastable to all heads.
798
+ if attention_mask.dim() == 3:
799
+ extended_attention_mask = attention_mask[:, None, :, :]
800
+ elif attention_mask.dim() == 2:
801
+ # Provided a padding mask of dimensions [batch_size, seq_length]
802
+ # - the model is an encoder, so make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
803
+ extended_attention_mask = attention_mask[:, None, None, :]
804
+ else:
805
+ raise ValueError(
806
+ "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
807
+ input_shape, attention_mask.shape
808
+ )
809
+ )
810
+
811
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
812
+ # masked positions, this operation will create a tensor which is 0.0 for
813
+ # positions we want to attend and -10000.0 for masked positions.
814
+ # Since we are adding it to the raw scores before the softmax, this is
815
+ # effectively the same as removing these entirely.
816
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
817
+ extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
818
+ return extended_attention_mask
819
+
820
+ def forward(
821
+ self,
822
+ attention_mask=None,
823
+ head_mask=None,
824
+ encoder_hidden_states=None,
825
+ encoder_attention_mask=None,
826
+ past_key_values=None,
827
+ output_attentions=None,
828
+ output_hidden_states=None,
829
+ return_dict=None,
830
+ ):
831
+ r"""
832
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, `optional`):
833
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
834
+ the model is configured as a decoder.
835
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, `optional`):
836
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
837
+ the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
838
+ - 1 for tokens that are **not masked**,
839
+ - 0 for tokens that are **masked**.
840
+ past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of:
841
+ shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and
842
+ value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are
843
+ used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key
844
+ value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape
845
+ `(batch_size, sequence_length)`.
846
+ """
847
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
848
+ output_hidden_states = (
849
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
850
+ )
851
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
852
+
853
+ query_embeds = self.query_embeds.repeat(encoder_hidden_states.shape[0], 1, 1)
854
+ embedding_output = query_embeds
855
+ input_shape = embedding_output.size()[:-1]
856
+ batch_size, seq_length = input_shape
857
+ device = embedding_output.device
858
+
859
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
860
+ # ourselves in which case we just need to make it broadcastable to all heads.
861
+ if attention_mask is None:
862
+ attention_mask = torch.ones(
863
+ (query_embeds.shape[0], query_embeds.shape[1]), dtype=torch.long, device=query_embeds.device
864
+ )
865
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device)
866
+
867
+ # If a 2D or 3D attention mask is provided for the cross-attention
868
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
869
+ if encoder_hidden_states is not None:
870
+ if type(encoder_hidden_states) == list:
871
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
872
+ else:
873
+ (
874
+ encoder_batch_size,
875
+ encoder_sequence_length,
876
+ _,
877
+ ) = encoder_hidden_states.size()
878
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
879
+
880
+ if type(encoder_attention_mask) == list:
881
+ encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
882
+ elif encoder_attention_mask is None:
883
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
884
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
885
+ else:
886
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
887
+ else:
888
+ encoder_extended_attention_mask = None
889
+
890
+ # Prepare head mask if needed
891
+ # 1.0 in head_mask indicate we keep the head
892
+ # attention_probs has shape bsz x n_heads x N x N
893
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
894
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
895
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
896
+
897
+ encoder_outputs = self.encoder(
898
+ embedding_output,
899
+ attention_mask=extended_attention_mask,
900
+ head_mask=head_mask,
901
+ encoder_hidden_states=encoder_hidden_states,
902
+ encoder_attention_mask=encoder_extended_attention_mask,
903
+ past_key_values=past_key_values,
904
+ output_attentions=output_attentions,
905
+ output_hidden_states=output_hidden_states,
906
+ return_dict=return_dict,
907
+ )
908
+ sequence_output = encoder_outputs[0]
909
+ pooled_output = sequence_output[:, 0, :]
910
+
911
+ sequence_output = self.visual_fc(sequence_output)
912
+ sequence_output = torch.cat([sequence_output, self.vit_eos.repeat(sequence_output.shape[0], 1, 1)], dim=1)
913
+
914
+ return BaseModelOutputWithPooling(
915
+ last_hidden_state=sequence_output,
916
+ pooler_output=pooled_output,
917
+ hidden_states=encoder_outputs.hidden_states,
918
+ )
919
+
920
+
921
+ if __name__ == "__main__":
922
+ from configuration_mplug_owl2 import MPLUGOwl2Config
923
+ config = MPLUGOwl2Config()
924
+ visual_model = MplugOwlVisionModel(config.visual_config["visual_model"])
925
+ print(visual_model)
926
+
927
+ abstractor_module = MplugOwlVisualAbstractorModel(config.visual_config["visual_abstractor"], config.hidden_size)
928
+ print(abstractor_module)