|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" PyTorch DINOv2 model.""" |
|
|
|
|
|
import collections.abc |
|
import math |
|
from typing import Dict, List, Optional, Set, Tuple, Union |
|
|
|
import torch |
|
import torch.utils.checkpoint |
|
from torch import nn |
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss |
|
|
|
from transformers.activations import ACT2FN |
|
from transformers.modeling_outputs import ( |
|
BackboneOutput, |
|
BaseModelOutput, |
|
BaseModelOutputWithPooling, |
|
ImageClassifierOutput, |
|
) |
|
from transformers.modeling_utils import PreTrainedModel |
|
from transformers.pytorch_utils import ( |
|
find_pruneable_heads_and_indices, |
|
prune_linear_layer, |
|
) |
|
from transformers.utils import ( |
|
add_code_sample_docstrings, |
|
add_start_docstrings, |
|
add_start_docstrings_to_model_forward, |
|
logging, |
|
replace_return_docstrings, |
|
) |
|
from transformers.utils.backbone_utils import BackboneMixin |
|
from .configuration_dinov2 import Dinov2Config |
|
|
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
|
|
_CONFIG_FOR_DOC = "Dinov2Config" |
|
|
|
|
|
_CHECKPOINT_FOR_DOC = "facebook/dinov2-base" |
|
_EXPECTED_OUTPUT_SHAPE = [1, 257, 768] |
|
|
|
|
|
_IMAGE_CLASS_CHECKPOINT = "facebook/dinov2-small-imagenet1k-1-layer" |
|
_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" |
|
|
|
|
|
class Dinov2EmbeddingsWithRegisters(nn.Module): |
|
""" |
|
Construct the CLS token, mask token, position and patch embeddings. |
|
""" |
|
|
|
def __init__(self, config: Dinov2Config) -> None: |
|
super().__init__() |
|
|
|
self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size)) |
|
self.mask_token = nn.Parameter(torch.zeros(1, config.hidden_size)) |
|
self.patch_embeddings = Dinov2PatchEmbeddings(config) |
|
num_patches = self.patch_embeddings.num_patches |
|
self.position_embeddings = nn.Parameter( |
|
torch.randn(1, num_patches + 1, config.hidden_size) |
|
) |
|
self.dropout = nn.Dropout(config.hidden_dropout_prob) |
|
self.config = config |
|
|
|
if config.num_register_tokens > 0: |
|
self.register_tokens = nn.Parameter( |
|
torch.randn(1, config.num_register_tokens, config.hidden_size) |
|
) |
|
else: |
|
self.register_tokens = None |
|
|
|
def interpolate_pos_encoding( |
|
self, embeddings: torch.Tensor, height: int, width: int |
|
) -> torch.Tensor: |
|
""" |
|
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher |
|
resolution images. |
|
|
|
Source: |
|
https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174 |
|
""" |
|
|
|
num_patches = embeddings.shape[1] - 1 |
|
num_positions = self.position_embeddings.shape[1] - 1 |
|
if num_patches == num_positions and height == width: |
|
return self.position_embeddings |
|
class_pos_embed = self.position_embeddings[:, 0] |
|
patch_pos_embed = self.position_embeddings[:, 1:] |
|
dim = embeddings.shape[-1] |
|
height = height // self.config.patch_size |
|
width = width // self.config.patch_size |
|
|
|
|
|
height, width = height + 0.1, width + 0.1 |
|
patch_pos_embed = patch_pos_embed.reshape( |
|
1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim |
|
) |
|
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) |
|
target_dtype = patch_pos_embed.dtype |
|
patch_pos_embed = nn.functional.interpolate( |
|
patch_pos_embed.to(dtype=torch.float32), |
|
scale_factor=( |
|
float(height / math.sqrt(num_positions)), |
|
float(width / math.sqrt(num_positions)), |
|
), |
|
mode="bicubic", |
|
align_corners=False, |
|
).to(dtype=target_dtype) |
|
if ( |
|
int(height) != patch_pos_embed.shape[-2] |
|
or int(width) != patch_pos_embed.shape[-1] |
|
): |
|
raise ValueError( |
|
"Width or height does not match with the interpolated position embeddings" |
|
) |
|
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) |
|
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1) |
|
|
|
def forward( |
|
self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor] = None |
|
) -> torch.Tensor: |
|
batch_size, _, height, width = pixel_values.shape |
|
target_dtype = self.patch_embeddings.projection.weight.dtype |
|
embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype)) |
|
|
|
if bool_masked_pos is not None: |
|
embeddings = torch.where( |
|
bool_masked_pos.unsqueeze(-1), |
|
self.mask_token.to(embeddings.dtype).unsqueeze(0), |
|
embeddings, |
|
) |
|
|
|
|
|
cls_tokens = self.cls_token.expand(batch_size, -1, -1) |
|
embeddings = torch.cat((cls_tokens, embeddings), dim=1) |
|
|
|
|
|
embeddings = embeddings + self.interpolate_pos_encoding( |
|
embeddings, height, width |
|
) |
|
|
|
|
|
if self.register_tokens is not None: |
|
embeddings = torch.cat( |
|
( |
|
embeddings[:, :1], |
|
self.register_tokens.expand(embeddings.shape[0], -1, -1), |
|
embeddings[:, 1:], |
|
), |
|
dim=1, |
|
) |
|
|
|
embeddings = self.dropout(embeddings) |
|
|
|
return embeddings |
|
|
|
|
|
class Dinov2PatchEmbeddings(nn.Module): |
|
""" |
|
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial |
|
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a |
|
Transformer. |
|
""" |
|
|
|
def __init__(self, config): |
|
super().__init__() |
|
image_size, patch_size = config.image_size, config.patch_size |
|
num_channels, hidden_size = config.num_channels, config.hidden_size |
|
|
|
image_size = ( |
|
image_size |
|
if isinstance(image_size, collections.abc.Iterable) |
|
else (image_size, image_size) |
|
) |
|
patch_size = ( |
|
patch_size |
|
if isinstance(patch_size, collections.abc.Iterable) |
|
else (patch_size, patch_size) |
|
) |
|
num_patches = (image_size[1] // patch_size[1]) * ( |
|
image_size[0] // patch_size[0] |
|
) |
|
self.image_size = image_size |
|
self.patch_size = patch_size |
|
self.num_channels = num_channels |
|
self.num_patches = num_patches |
|
|
|
self.projection = nn.Conv2d( |
|
num_channels, hidden_size, kernel_size=patch_size, stride=patch_size |
|
) |
|
|
|
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: |
|
num_channels = pixel_values.shape[1] |
|
if num_channels != self.num_channels: |
|
raise ValueError( |
|
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." |
|
f" Expected {self.num_channels} but got {num_channels}." |
|
) |
|
embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2) |
|
return embeddings |
|
|
|
|
|
|
|
class Dinov2SelfAttention(nn.Module): |
|
def __init__(self, config: Dinov2Config) -> None: |
|
super().__init__() |
|
if config.hidden_size % config.num_attention_heads != 0 and not hasattr( |
|
config, "embedding_size" |
|
): |
|
raise ValueError( |
|
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention " |
|
f"heads {config.num_attention_heads}." |
|
) |
|
|
|
self.num_attention_heads = config.num_attention_heads |
|
self.attention_head_size = int(config.hidden_size / config.num_attention_heads) |
|
self.all_head_size = self.num_attention_heads * self.attention_head_size |
|
|
|
self.query = nn.Linear( |
|
config.hidden_size, self.all_head_size, bias=config.qkv_bias |
|
) |
|
self.key = nn.Linear( |
|
config.hidden_size, self.all_head_size, bias=config.qkv_bias |
|
) |
|
self.value = nn.Linear( |
|
config.hidden_size, self.all_head_size, bias=config.qkv_bias |
|
) |
|
|
|
self.dropout = nn.Dropout(config.attention_probs_dropout_prob) |
|
|
|
def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: |
|
new_x_shape = x.size()[:-1] + ( |
|
self.num_attention_heads, |
|
self.attention_head_size, |
|
) |
|
x = x.view(new_x_shape) |
|
return x.permute(0, 2, 1, 3) |
|
|
|
def forward( |
|
self, |
|
hidden_states, |
|
head_mask: Optional[torch.Tensor] = None, |
|
output_attentions: bool = False, |
|
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: |
|
mixed_query_layer = self.query(hidden_states) |
|
|
|
key_layer = self.transpose_for_scores(self.key(hidden_states)) |
|
value_layer = self.transpose_for_scores(self.value(hidden_states)) |
|
query_layer = self.transpose_for_scores(mixed_query_layer) |
|
|
|
|
|
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) |
|
|
|
attention_scores = attention_scores / math.sqrt(self.attention_head_size) |
|
|
|
|
|
attention_probs = nn.functional.softmax(attention_scores, dim=-1) |
|
|
|
|
|
|
|
attention_probs = self.dropout(attention_probs) |
|
|
|
|
|
if head_mask is not None: |
|
attention_probs = attention_probs * head_mask |
|
|
|
context_layer = torch.matmul(attention_probs, value_layer) |
|
|
|
context_layer = context_layer.permute(0, 2, 1, 3).contiguous() |
|
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) |
|
context_layer = context_layer.view(new_context_layer_shape) |
|
|
|
outputs = ( |
|
(context_layer, attention_probs) if output_attentions else (context_layer,) |
|
) |
|
|
|
return outputs |
|
|
|
|
|
|
|
class Dinov2SelfOutput(nn.Module): |
|
""" |
|
The residual connection is defined in Dinov2Layer instead of here (as is the case with other models), due to the |
|
layernorm applied before each block. |
|
""" |
|
|
|
def __init__(self, config: Dinov2Config) -> None: |
|
super().__init__() |
|
self.dense = nn.Linear(config.hidden_size, config.hidden_size) |
|
self.dropout = nn.Dropout(config.hidden_dropout_prob) |
|
|
|
def forward( |
|
self, hidden_states: torch.Tensor, input_tensor: torch.Tensor |
|
) -> torch.Tensor: |
|
hidden_states = self.dense(hidden_states) |
|
hidden_states = self.dropout(hidden_states) |
|
|
|
return hidden_states |
|
|
|
|
|
|
|
class Dinov2Attention(nn.Module): |
|
def __init__(self, config: Dinov2Config) -> None: |
|
super().__init__() |
|
self.attention = Dinov2SelfAttention(config) |
|
self.output = Dinov2SelfOutput(config) |
|
self.pruned_heads = set() |
|
|
|
def prune_heads(self, heads: Set[int]) -> None: |
|
if len(heads) == 0: |
|
return |
|
heads, index = find_pruneable_heads_and_indices( |
|
heads, |
|
self.attention.num_attention_heads, |
|
self.attention.attention_head_size, |
|
self.pruned_heads, |
|
) |
|
|
|
|
|
self.attention.query = prune_linear_layer(self.attention.query, index) |
|
self.attention.key = prune_linear_layer(self.attention.key, index) |
|
self.attention.value = prune_linear_layer(self.attention.value, index) |
|
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) |
|
|
|
|
|
self.attention.num_attention_heads = self.attention.num_attention_heads - len( |
|
heads |
|
) |
|
self.attention.all_head_size = ( |
|
self.attention.attention_head_size * self.attention.num_attention_heads |
|
) |
|
self.pruned_heads = self.pruned_heads.union(heads) |
|
|
|
def forward( |
|
self, |
|
hidden_states: torch.Tensor, |
|
head_mask: Optional[torch.Tensor] = None, |
|
output_attentions: bool = False, |
|
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: |
|
self_outputs = self.attention(hidden_states, head_mask, output_attentions) |
|
|
|
attention_output = self.output(self_outputs[0], hidden_states) |
|
|
|
outputs = (attention_output,) + self_outputs[ |
|
1: |
|
] |
|
return outputs |
|
|
|
|
|
class Dinov2LayerScale(nn.Module): |
|
def __init__(self, config) -> None: |
|
super().__init__() |
|
self.lambda1 = nn.Parameter( |
|
config.layerscale_value * torch.ones(config.hidden_size) |
|
) |
|
|
|
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: |
|
return hidden_state * self.lambda1 |
|
|
|
|
|
|
|
def drop_path( |
|
input: torch.Tensor, drop_prob: float = 0.0, training: bool = False |
|
) -> torch.Tensor: |
|
""" |
|
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). |
|
|
|
Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, |
|
however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... |
|
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the |
|
layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the |
|
argument. |
|
""" |
|
if drop_prob == 0.0 or not training: |
|
return input |
|
keep_prob = 1 - drop_prob |
|
shape = (input.shape[0],) + (1,) * ( |
|
input.ndim - 1 |
|
) |
|
random_tensor = keep_prob + torch.rand( |
|
shape, dtype=input.dtype, device=input.device |
|
) |
|
random_tensor.floor_() |
|
output = input.div(keep_prob) * random_tensor |
|
return output |
|
|
|
|
|
|
|
class Dinov2DropPath(nn.Module): |
|
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" |
|
|
|
def __init__(self, drop_prob: Optional[float] = None) -> None: |
|
super().__init__() |
|
self.drop_prob = drop_prob |
|
|
|
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: |
|
return drop_path(hidden_states, self.drop_prob, self.training) |
|
|
|
def extra_repr(self) -> str: |
|
return "p={}".format(self.drop_prob) |
|
|
|
|
|
class Dinov2MLP(nn.Module): |
|
def __init__(self, config) -> None: |
|
super().__init__() |
|
in_features = out_features = config.hidden_size |
|
hidden_features = int(config.hidden_size * config.mlp_ratio) |
|
self.fc1 = nn.Linear(in_features, hidden_features, bias=True) |
|
if isinstance(config.hidden_act, str): |
|
self.activation = ACT2FN[config.hidden_act] |
|
else: |
|
self.activation = config.hidden_act |
|
self.fc2 = nn.Linear(hidden_features, out_features, bias=True) |
|
|
|
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: |
|
hidden_state = self.fc1(hidden_state) |
|
hidden_state = self.activation(hidden_state) |
|
hidden_state = self.fc2(hidden_state) |
|
return hidden_state |
|
|
|
|
|
class Dinov2SwiGLUFFN(nn.Module): |
|
def __init__(self, config) -> None: |
|
super().__init__() |
|
in_features = out_features = config.hidden_size |
|
hidden_features = int(config.hidden_size * config.mlp_ratio) |
|
hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8 |
|
|
|
self.weights_in = nn.Linear(in_features, 2 * hidden_features, bias=True) |
|
self.weights_out = nn.Linear(hidden_features, out_features, bias=True) |
|
|
|
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor: |
|
hidden_state = self.weights_in(hidden_state) |
|
x1, x2 = hidden_state.chunk(2, dim=-1) |
|
hidden = nn.functional.silu(x1) * x2 |
|
return self.weights_out(hidden) |
|
|
|
|
|
class Dinov2Layer(nn.Module): |
|
"""This corresponds to the Block class in the original implementation.""" |
|
|
|
def __init__(self, config: Dinov2Config) -> None: |
|
super().__init__() |
|
|
|
self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
|
self.attention = Dinov2Attention(config) |
|
self.layer_scale1 = Dinov2LayerScale(config) |
|
self.drop_path = ( |
|
Dinov2DropPath(config.drop_path_rate) |
|
if config.drop_path_rate > 0.0 |
|
else nn.Identity() |
|
) |
|
|
|
self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
|
|
|
if config.use_swiglu_ffn: |
|
self.mlp = Dinov2SwiGLUFFN(config) |
|
else: |
|
self.mlp = Dinov2MLP(config) |
|
self.layer_scale2 = Dinov2LayerScale(config) |
|
|
|
def forward( |
|
self, |
|
hidden_states: torch.Tensor, |
|
head_mask: Optional[torch.Tensor] = None, |
|
output_attentions: bool = False, |
|
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: |
|
self_attention_outputs = self.attention( |
|
self.norm1( |
|
hidden_states |
|
), |
|
head_mask, |
|
output_attentions=output_attentions, |
|
) |
|
attention_output = self_attention_outputs[0] |
|
|
|
attention_output = self.layer_scale1(attention_output) |
|
outputs = self_attention_outputs[ |
|
1: |
|
] |
|
|
|
|
|
hidden_states = self.drop_path(attention_output) + hidden_states |
|
|
|
|
|
layer_output = self.norm2(hidden_states) |
|
layer_output = self.mlp(layer_output) |
|
layer_output = self.layer_scale2(layer_output) |
|
|
|
|
|
layer_output = self.drop_path(layer_output) + hidden_states |
|
|
|
outputs = (layer_output,) + outputs |
|
|
|
return outputs |
|
|
|
|
|
|
|
class Dinov2Encoder(nn.Module): |
|
def __init__(self, config: Dinov2Config) -> None: |
|
super().__init__() |
|
self.config = config |
|
self.layer = nn.ModuleList( |
|
[Dinov2Layer(config) for _ in range(config.num_hidden_layers)] |
|
) |
|
self.gradient_checkpointing = False |
|
|
|
def forward( |
|
self, |
|
hidden_states: torch.Tensor, |
|
head_mask: Optional[torch.Tensor] = None, |
|
output_attentions: bool = False, |
|
output_hidden_states: bool = False, |
|
return_dict: bool = True, |
|
) -> Union[tuple, BaseModelOutput]: |
|
all_hidden_states = () if output_hidden_states else None |
|
all_self_attentions = () if output_attentions else None |
|
|
|
for i, layer_module in enumerate(self.layer): |
|
if output_hidden_states: |
|
all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
|
layer_head_mask = head_mask[i] if head_mask is not None else None |
|
|
|
if self.gradient_checkpointing and self.training: |
|
layer_outputs = self._gradient_checkpointing_func( |
|
layer_module.__call__, |
|
hidden_states, |
|
layer_head_mask, |
|
output_attentions, |
|
) |
|
else: |
|
layer_outputs = layer_module( |
|
hidden_states, layer_head_mask, output_attentions |
|
) |
|
|
|
hidden_states = layer_outputs[0] |
|
|
|
if output_attentions: |
|
all_self_attentions = all_self_attentions + (layer_outputs[1],) |
|
|
|
if output_hidden_states: |
|
all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
|
if not return_dict: |
|
return tuple( |
|
v |
|
for v in [hidden_states, all_hidden_states, all_self_attentions] |
|
if v is not None |
|
) |
|
return BaseModelOutput( |
|
last_hidden_state=hidden_states, |
|
hidden_states=all_hidden_states, |
|
attentions=all_self_attentions, |
|
) |
|
|
|
|
|
class Dinov2PreTrainedModel(PreTrainedModel): |
|
""" |
|
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained |
|
models. |
|
""" |
|
|
|
config_class = Dinov2Config |
|
base_model_prefix = "dinov2" |
|
main_input_name = "pixel_values" |
|
supports_gradient_checkpointing = True |
|
_no_split_modules = ["Dinov2SwiGLUFFN"] |
|
|
|
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: |
|
"""Initialize the weights""" |
|
if isinstance(module, (nn.Linear, nn.Conv2d)): |
|
|
|
|
|
module.weight.data = nn.init.trunc_normal_( |
|
module.weight.data.to(torch.float32), |
|
mean=0.0, |
|
std=self.config.initializer_range, |
|
).to(module.weight.dtype) |
|
if module.bias is not None: |
|
module.bias.data.zero_() |
|
elif isinstance(module, nn.LayerNorm): |
|
module.bias.data.zero_() |
|
module.weight.data.fill_(1.0) |
|
elif isinstance(module, Dinov2EmbeddingsWithRegisters): |
|
module.position_embeddings.data = nn.init.trunc_normal_( |
|
module.position_embeddings.data.to(torch.float32), |
|
mean=0.0, |
|
std=self.config.initializer_range, |
|
).to(module.position_embeddings.dtype) |
|
|
|
module.cls_token.data = nn.init.trunc_normal_( |
|
module.cls_token.data.to(torch.float32), |
|
mean=0.0, |
|
std=self.config.initializer_range, |
|
).to(module.cls_token.dtype) |
|
if module.register_tokens is not None: |
|
module.register_tokens.data = nn.init.trunc_normal_( |
|
module.register_tokens.data.to(torch.float32), |
|
mean=0.0, |
|
std=self.config.initializer_range, |
|
).to(module.register_tokens.dtype) |
|
|
|
|
|
DINOV2_START_DOCSTRING = r""" |
|
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it |
|
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and |
|
behavior. |
|
|
|
Parameters: |
|
config ([`Dinov2Config`]): Model configuration class with all the parameters of the model. |
|
Initializing with a config file does not load the weights associated with the model, only the |
|
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. |
|
""" |
|
|
|
DINOV2_BASE_INPUTS_DOCSTRING = r""" |
|
Args: |
|
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): |
|
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See |
|
[`BitImageProcessor.preprocess`] for details. |
|
|
|
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`): |
|
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Only relevant for |
|
pre-training. |
|
|
|
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): |
|
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: |
|
|
|
- 1 indicates the head is **not masked**, |
|
- 0 indicates the head is **masked**. |
|
|
|
output_attentions (`bool`, *optional*): |
|
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned |
|
tensors for more detail. |
|
output_hidden_states (`bool`, *optional*): |
|
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for |
|
more detail. |
|
return_dict (`bool`, *optional*): |
|
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
|
""" |
|
|
|
DINOV2_INPUTS_DOCSTRING = r""" |
|
Args: |
|
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): |
|
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See |
|
[`BitImageProcessor.preprocess`] for details. |
|
|
|
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): |
|
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: |
|
|
|
- 1 indicates the head is **not masked**, |
|
- 0 indicates the head is **masked**. |
|
|
|
output_attentions (`bool`, *optional*): |
|
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned |
|
tensors for more detail. |
|
output_hidden_states (`bool`, *optional*): |
|
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for |
|
more detail. |
|
return_dict (`bool`, *optional*): |
|
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
|
""" |
|
|
|
|
|
@add_start_docstrings( |
|
"The bare DINOv2 Model transformer outputting raw hidden-states without any specific head on top.", |
|
DINOV2_START_DOCSTRING, |
|
) |
|
class Dinov2ModelWithRegisters(Dinov2PreTrainedModel): |
|
def __init__(self, config: Dinov2Config): |
|
super().__init__(config) |
|
self.config = config |
|
|
|
self.embeddings = Dinov2EmbeddingsWithRegisters(config) |
|
self.encoder = Dinov2Encoder(config) |
|
|
|
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
|
|
|
|
|
self.post_init() |
|
|
|
def get_input_embeddings(self) -> Dinov2PatchEmbeddings: |
|
return self.embeddings.patch_embeddings |
|
|
|
def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: |
|
""" |
|
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base |
|
class PreTrainedModel |
|
""" |
|
for layer, heads in heads_to_prune.items(): |
|
self.encoder.layer[layer].attention.prune_heads(heads) |
|
|
|
@add_start_docstrings_to_model_forward(DINOV2_BASE_INPUTS_DOCSTRING) |
|
@add_code_sample_docstrings( |
|
checkpoint=_CHECKPOINT_FOR_DOC, |
|
output_type=BaseModelOutputWithPooling, |
|
config_class=_CONFIG_FOR_DOC, |
|
modality="vision", |
|
expected_output=_EXPECTED_OUTPUT_SHAPE, |
|
) |
|
def forward( |
|
self, |
|
pixel_values: Optional[torch.Tensor] = None, |
|
bool_masked_pos: Optional[torch.Tensor] = None, |
|
head_mask: Optional[torch.Tensor] = None, |
|
output_attentions: Optional[bool] = None, |
|
output_hidden_states: Optional[bool] = None, |
|
return_dict: Optional[bool] = None, |
|
) -> Union[Tuple, BaseModelOutputWithPooling]: |
|
output_attentions = ( |
|
output_attentions |
|
if output_attentions is not None |
|
else self.config.output_attentions |
|
) |
|
output_hidden_states = ( |
|
output_hidden_states |
|
if output_hidden_states is not None |
|
else self.config.output_hidden_states |
|
) |
|
return_dict = ( |
|
return_dict if return_dict is not None else self.config.use_return_dict |
|
) |
|
|
|
if pixel_values is None: |
|
raise ValueError("You have to specify pixel_values") |
|
|
|
|
|
|
|
|
|
|
|
|
|
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) |
|
|
|
embedding_output = self.embeddings( |
|
pixel_values, bool_masked_pos=bool_masked_pos |
|
) |
|
|
|
encoder_outputs = self.encoder( |
|
embedding_output, |
|
head_mask=head_mask, |
|
output_attentions=output_attentions, |
|
output_hidden_states=output_hidden_states, |
|
return_dict=return_dict, |
|
) |
|
sequence_output = encoder_outputs[0] |
|
sequence_output = self.layernorm(sequence_output) |
|
pooled_output = sequence_output[:, 0, :] |
|
|
|
if not return_dict: |
|
head_outputs = (sequence_output, pooled_output) |
|
return head_outputs + encoder_outputs[1:] |
|
|
|
return BaseModelOutputWithPooling( |
|
last_hidden_state=sequence_output, |
|
pooler_output=pooled_output, |
|
hidden_states=encoder_outputs.hidden_states, |
|
attentions=encoder_outputs.attentions, |
|
) |
|
|
|
|
|
@add_start_docstrings( |
|
""" |
|
Dinov2 Model transformer with an image classification head on top (a linear layer on top of the final hidden state |
|
of the [CLS] token) e.g. for ImageNet. |
|
""", |
|
DINOV2_START_DOCSTRING, |
|
) |
|
class Dinov2ForImageClassification(Dinov2PreTrainedModel): |
|
def __init__(self, config: Dinov2Config) -> None: |
|
super().__init__(config) |
|
|
|
self.num_labels = config.num_labels |
|
self.dinov2 = Dinov2ModelWithRegisters(config) |
|
|
|
|
|
self.classifier = ( |
|
nn.Linear(config.hidden_size * 2, config.num_labels) |
|
if config.num_labels > 0 |
|
else nn.Identity() |
|
) |
|
|
|
|
|
self.post_init() |
|
|
|
@add_start_docstrings_to_model_forward(DINOV2_INPUTS_DOCSTRING) |
|
@add_code_sample_docstrings( |
|
checkpoint=_IMAGE_CLASS_CHECKPOINT, |
|
output_type=ImageClassifierOutput, |
|
config_class=_CONFIG_FOR_DOC, |
|
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, |
|
) |
|
def forward( |
|
self, |
|
pixel_values: Optional[torch.Tensor] = None, |
|
head_mask: Optional[torch.Tensor] = None, |
|
labels: Optional[torch.Tensor] = None, |
|
output_attentions: Optional[bool] = None, |
|
output_hidden_states: Optional[bool] = None, |
|
return_dict: Optional[bool] = None, |
|
) -> Union[tuple, ImageClassifierOutput]: |
|
r""" |
|
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
|
Labels for computing the image classification/regression loss. Indices should be in `[0, ..., |
|
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If |
|
`config.num_labels > 1` a classification loss is computed (Cross-Entropy). |
|
""" |
|
return_dict = ( |
|
return_dict if return_dict is not None else self.config.use_return_dict |
|
) |
|
|
|
outputs = self.dinov2( |
|
pixel_values, |
|
head_mask=head_mask, |
|
output_attentions=output_attentions, |
|
output_hidden_states=output_hidden_states, |
|
return_dict=return_dict, |
|
) |
|
|
|
sequence_output = outputs[0] |
|
|
|
cls_token = sequence_output[:, 0] |
|
patch_tokens = sequence_output[:, 1:] |
|
|
|
linear_input = torch.cat([cls_token, patch_tokens.mean(dim=1)], dim=1) |
|
|
|
logits = self.classifier(linear_input) |
|
|
|
loss = None |
|
if labels is not None: |
|
|
|
labels = labels.to(logits.device) |
|
if self.config.problem_type is None: |
|
if self.num_labels == 1: |
|
self.config.problem_type = "regression" |
|
elif self.num_labels > 1 and ( |
|
labels.dtype == torch.long or labels.dtype == torch.int |
|
): |
|
self.config.problem_type = "single_label_classification" |
|
else: |
|
self.config.problem_type = "multi_label_classification" |
|
|
|
if self.config.problem_type == "regression": |
|
loss_fct = MSELoss() |
|
if self.num_labels == 1: |
|
loss = loss_fct(logits.squeeze(), labels.squeeze()) |
|
else: |
|
loss = loss_fct(logits, labels) |
|
elif self.config.problem_type == "single_label_classification": |
|
loss_fct = CrossEntropyLoss() |
|
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) |
|
elif self.config.problem_type == "multi_label_classification": |
|
loss_fct = BCEWithLogitsLoss() |
|
loss = loss_fct(logits, labels) |
|
|
|
if not return_dict: |
|
output = (logits,) + outputs[2:] |
|
return ((loss,) + output) if loss is not None else output |
|
|
|
return ImageClassifierOutput( |
|
loss=loss, |
|
logits=logits, |
|
hidden_states=outputs.hidden_states, |
|
attentions=outputs.attentions, |
|
) |
|
|
|
|
|
@add_start_docstrings( |
|
""" |
|
Dinov2 backbone, to be used with frameworks like DETR and MaskFormer. |
|
""", |
|
DINOV2_START_DOCSTRING, |
|
) |
|
class Dinov2Backbone(Dinov2PreTrainedModel, BackboneMixin): |
|
def __init__(self, config): |
|
super().__init__(config) |
|
super()._init_backbone(config) |
|
|
|
self.num_features = [ |
|
config.hidden_size for _ in range(config.num_hidden_layers + 1) |
|
] |
|
self.embeddings = Dinov2EmbeddingsWithRegisters(config) |
|
self.encoder = Dinov2Encoder(config) |
|
|
|
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
|
|
|
|
|
self.post_init() |
|
|
|
def get_input_embeddings(self) -> Dinov2PatchEmbeddings: |
|
return self.embeddings.patch_embeddings |
|
|
|
@add_start_docstrings_to_model_forward(DINOV2_INPUTS_DOCSTRING) |
|
@replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC) |
|
def forward( |
|
self, |
|
pixel_values: torch.Tensor, |
|
output_hidden_states: Optional[bool] = None, |
|
output_attentions: Optional[bool] = None, |
|
return_dict: Optional[bool] = None, |
|
) -> BackboneOutput: |
|
""" |
|
Returns: |
|
|
|
Examples: |
|
|
|
```python |
|
>>> from transformers import AutoImageProcessor, AutoBackbone |
|
>>> import torch |
|
>>> from PIL import Image |
|
>>> import requests |
|
|
|
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" |
|
>>> image = Image.open(requests.get(url, stream=True).raw) |
|
|
|
>>> processor = AutoImageProcessor.from_pretrained("facebook/dinov2-base") |
|
>>> model = AutoBackbone.from_pretrained( |
|
... "facebook/dinov2-base", out_features=["stage2", "stage5", "stage8", "stage11"] |
|
... ) |
|
|
|
>>> inputs = processor(image, return_tensors="pt") |
|
|
|
>>> outputs = model(**inputs) |
|
>>> feature_maps = outputs.feature_maps |
|
>>> list(feature_maps[-1].shape) |
|
[1, 768, 16, 16] |
|
```""" |
|
return_dict = ( |
|
return_dict if return_dict is not None else self.config.use_return_dict |
|
) |
|
output_hidden_states = ( |
|
output_hidden_states |
|
if output_hidden_states is not None |
|
else self.config.output_hidden_states |
|
) |
|
output_attentions = ( |
|
output_attentions |
|
if output_attentions is not None |
|
else self.config.output_attentions |
|
) |
|
|
|
embedding_output = self.embeddings(pixel_values) |
|
|
|
outputs = self.encoder( |
|
embedding_output, |
|
output_hidden_states=True, |
|
output_attentions=output_attentions, |
|
return_dict=return_dict, |
|
) |
|
|
|
hidden_states = outputs.hidden_states if return_dict else outputs[1] |
|
|
|
feature_maps = () |
|
for stage, hidden_state in zip(self.stage_names, hidden_states): |
|
if stage in self.out_features: |
|
if self.config.apply_layernorm: |
|
hidden_state = self.layernorm(hidden_state) |
|
if self.config.reshape_hidden_states: |
|
hidden_state = hidden_state[:, 1:] |
|
|
|
|
|
batch_size, _, height, width = pixel_values.shape |
|
patch_size = self.config.patch_size |
|
hidden_state = hidden_state.reshape( |
|
batch_size, height // patch_size, width // patch_size, -1 |
|
) |
|
hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous() |
|
feature_maps += (hidden_state,) |
|
|
|
if not return_dict: |
|
if output_hidden_states: |
|
output = (feature_maps,) + outputs[1:] |
|
else: |
|
output = (feature_maps,) + outputs[2:] |
|
return output |
|
|
|
return BackboneOutput( |
|
feature_maps=feature_maps, |
|
hidden_states=outputs.hidden_states if output_hidden_states else None, |
|
attentions=outputs.attentions if output_attentions else None, |
|
) |
|
|