Spaces:
Build error
Build error
# Copyright 2024 The YourMT3 Authors. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Please see the details in the LICENSE file. | |
from typing import Tuple, Literal, Any, Optional | |
import math | |
import torch | |
from torch import nn | |
from transformers.activations import ACT2FN | |
from transformers.modeling_outputs import BaseModelOutput | |
from model.conformer_helper import ConformerYMT3Config, ConformerYMT3PreTrainedModel | |
from model.positional_encoding import (Wav2Vec2ConformerRelPositionalEmbedding, | |
Wav2Vec2ConformerRotaryPositionalEmbedding) | |
class ConformerYMT3FeedForward(nn.Module): | |
def __init__(self, config): | |
super().__init__() | |
self.intermediate_dropout = nn.Dropout(config.dropout_rate) | |
self.intermediate_dense = nn.Linear(config.d_model, config.intermediate_size) | |
if isinstance(config.hidden_act, str): | |
self.intermediate_act_fn = ACT2FN[config.hidden_act] | |
else: | |
self.intermediate_act_fn = config.hidden_act | |
self.output_dense = nn.Linear(config.intermediate_size, config.d_model) | |
self.output_dropout = nn.Dropout(config.dropout_rate) | |
def forward(self, hidden_states): | |
hidden_states = self.intermediate_dense(hidden_states) | |
hidden_states = self.intermediate_act_fn(hidden_states) | |
hidden_states = self.intermediate_dropout(hidden_states) | |
hidden_states = self.output_dense(hidden_states) | |
hidden_states = self.output_dropout(hidden_states) | |
return hidden_states | |
class ConformerYMT3ConvolutionModule(nn.Module): | |
"""Convolution block used in the conformer block""" | |
def __init__(self, config): | |
super().__init__() | |
if (config.conv_depthwise_kernel_size - 1) % 2 == 1: | |
raise ValueError("`config.conv_depthwise_kernel_size` should be a odd number for 'SAME' padding") | |
self.layer_norm = nn.LayerNorm(config.d_model) | |
self.pointwise_conv1 = torch.nn.Conv1d( | |
config.d_model, | |
2 * config.d_model, | |
kernel_size=1, | |
stride=1, | |
padding=0, | |
bias=False, | |
) | |
self.glu = torch.nn.GLU(dim=1) | |
self.depthwise_conv = torch.nn.Conv1d( | |
config.d_model, | |
config.d_model, | |
config.conv_depthwise_kernel_size, | |
stride=1, | |
padding=(config.conv_depthwise_kernel_size - 1) // 2, | |
groups=config.d_model, | |
bias=False, | |
) | |
self.batch_norm = torch.nn.BatchNorm1d(config.d_model) | |
self.activation = ACT2FN[config.hidden_act] | |
self.pointwise_conv2 = torch.nn.Conv1d( | |
config.d_model, | |
config.d_model, | |
kernel_size=1, | |
stride=1, | |
padding=0, | |
bias=False, | |
) | |
self.dropout = torch.nn.Dropout(config.dropout_rate) | |
def forward(self, hidden_states): | |
hidden_states = self.layer_norm(hidden_states) | |
# exchange the temporal dimension and the feature dimension | |
hidden_states = hidden_states.transpose(1, 2) | |
# GLU mechanism | |
# => (batch, 2*channel, dim) | |
hidden_states = self.pointwise_conv1(hidden_states) | |
# => (batch, channel, dim) | |
hidden_states = self.glu(hidden_states) | |
# 1D Depthwise Conv | |
hidden_states = self.depthwise_conv(hidden_states) | |
hidden_states = self.batch_norm(hidden_states) | |
hidden_states = self.activation(hidden_states) | |
hidden_states = self.pointwise_conv2(hidden_states) | |
hidden_states = self.dropout(hidden_states) | |
hidden_states = hidden_states.transpose(1, 2) | |
return hidden_states | |
class ConformerYMT3SelfAttention(nn.Module): | |
"""Construct a ConformerSelfAttention object. | |
Can be enhanced with rotary or relative position embeddings. | |
""" | |
def __init__(self, config): | |
super().__init__() | |
self.head_size = config.d_model // config.num_heads | |
self.num_heads = config.num_heads | |
self.position_encoding_type = config.position_encoding_type | |
self.linear_q = nn.Linear(config.d_model, config.d_model) | |
self.linear_k = nn.Linear(config.d_model, config.d_model) | |
self.linear_v = nn.Linear(config.d_model, config.d_model) | |
self.linear_out = nn.Linear(config.d_model, config.d_model) | |
self.dropout = nn.Dropout(p=config.dropout_rate) | |
if self.position_encoding_type == "relative": | |
# linear transformation for positional encoding | |
self.linear_pos = nn.Linear(config.d_model, config.d_model, bias=False) | |
# these two learnable bias are used in matrix c and matrix d | |
# as described in https://arxiv.org/abs/1901.02860 Section 3.3 | |
self.pos_bias_u = nn.Parameter(torch.zeros(self.num_heads, self.head_size)) | |
self.pos_bias_v = nn.Parameter(torch.zeros(self.num_heads, self.head_size)) | |
def forward( | |
self, | |
hidden_states: torch.Tensor, | |
attention_mask: Optional[torch.Tensor] = None, | |
relative_position_embeddings: Optional[torch.Tensor] = None, | |
output_attentions: bool = False, | |
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: | |
# self-attention mechanism | |
batch_size, sequence_length, d_model = hidden_states.size() | |
# make sure query/key states can be != value states | |
query_key_states = hidden_states | |
value_states = hidden_states | |
if self.position_encoding_type == "rotary": | |
if relative_position_embeddings is None: | |
raise ValueError( | |
"`relative_position_embeddings` has to be defined when `self.position_encoding_type == 'rotary'") | |
query_key_states = self._apply_rotary_embedding(query_key_states, relative_position_embeddings) | |
# project query_key_states and value_states | |
query = self.linear_q(query_key_states).view(batch_size, -1, self.num_heads, self.head_size) | |
key = self.linear_k(query_key_states).view(batch_size, -1, self.num_heads, self.head_size) | |
value = self.linear_v(value_states).view(batch_size, -1, self.num_heads, self.head_size) | |
# => (batch, head, time1, d_k) | |
query = query.transpose(1, 2) | |
key = key.transpose(1, 2) | |
value = value.transpose(1, 2) | |
if self.position_encoding_type == "relative": | |
if relative_position_embeddings is None: | |
raise ValueError("`relative_position_embeddings` has to be defined when `self.position_encoding_type ==" | |
" 'relative'") | |
# apply relative_position_embeddings to qk scores | |
# as proposed in Transformer_XL: https://arxiv.org/abs/1901.02860 | |
scores = self._apply_relative_embeddings(query=query, | |
key=key, | |
relative_position_embeddings=relative_position_embeddings) | |
else: | |
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.head_size) | |
# apply attention_mask if necessary | |
if attention_mask is not None: | |
scores = scores + attention_mask | |
# => (batch, head, time1, time2) | |
probs = torch.softmax(scores, dim=-1) | |
probs = self.dropout(probs) | |
# => (batch, head, time1, d_k) | |
hidden_states = torch.matmul(probs, value) | |
# => (batch, time1, d_model) | |
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, self.num_heads * self.head_size) | |
hidden_states = self.linear_out(hidden_states) | |
return hidden_states, probs | |
def _apply_rotary_embedding(self, hidden_states, relative_position_embeddings): | |
batch_size, sequence_length, d_model = hidden_states.size() | |
hidden_states = hidden_states.view(batch_size, sequence_length, self.num_heads, self.head_size) | |
cos = relative_position_embeddings[0, :sequence_length, ...] | |
sin = relative_position_embeddings[1, :sequence_length, ...] | |
# rotate hidden_states with rotary embeddings | |
hidden_states = hidden_states.transpose(0, 1) | |
rotated_states_begin = hidden_states[..., :self.head_size // 2] | |
rotated_states_end = hidden_states[..., self.head_size // 2:] | |
rotated_states = torch.cat((-rotated_states_end, rotated_states_begin), dim=rotated_states_begin.ndim - 1) | |
hidden_states = (hidden_states * cos) + (rotated_states * sin) | |
hidden_states = hidden_states.transpose(0, 1) | |
hidden_states = hidden_states.view(batch_size, sequence_length, self.num_heads * self.head_size) | |
return hidden_states | |
def _apply_relative_embeddings(self, query, key, relative_position_embeddings): | |
# 1. project positional embeddings | |
# => (batch, head, 2*time1-1, d_k) | |
proj_relative_position_embeddings = self.linear_pos(relative_position_embeddings) | |
proj_relative_position_embeddings = proj_relative_position_embeddings.view(relative_position_embeddings.size(0), | |
-1, self.num_heads, self.head_size) | |
proj_relative_position_embeddings = proj_relative_position_embeddings.transpose(1, 2) | |
proj_relative_position_embeddings = proj_relative_position_embeddings.transpose(2, 3) | |
# 2. Add bias to query | |
# => (batch, head, time1, d_k) | |
query = query.transpose(1, 2) | |
q_with_bias_u = (query + self.pos_bias_u).transpose(1, 2) | |
q_with_bias_v = (query + self.pos_bias_v).transpose(1, 2) | |
# 3. attention score: first compute matrix a and matrix c | |
# as described in https://arxiv.org/abs/1901.02860 Section 3.3 | |
# => (batch, head, time1, time2) | |
scores_ac = torch.matmul(q_with_bias_u, key.transpose(-2, -1)) | |
# 4. then compute matrix b and matrix d | |
# => (batch, head, time1, 2*time1-1) | |
scores_bd = torch.matmul(q_with_bias_v, proj_relative_position_embeddings) | |
# 5. shift matrix b and matrix d | |
zero_pad = torch.zeros((*scores_bd.size()[:3], 1), device=scores_bd.device, dtype=scores_bd.dtype) | |
scores_bd_padded = torch.cat([zero_pad, scores_bd], dim=-1) | |
scores_bd_padded_shape = scores_bd.size()[:2] + (scores_bd.shape[3] + 1, scores_bd.shape[2]) | |
scores_bd_padded = scores_bd_padded.view(*scores_bd_padded_shape) | |
scores_bd = scores_bd_padded[:, :, 1:].view_as(scores_bd) | |
scores_bd = scores_bd[:, :, :, :scores_bd.size(-1) // 2 + 1] | |
# 6. sum matrices | |
# => (batch, head, time1, time2) | |
scores = (scores_ac + scores_bd) / math.sqrt(self.head_size) | |
return scores | |
class ConformerYMT3EncoderLayer(nn.Module): | |
"""Conformer block based on https://arxiv.org/abs/2005.08100.""" | |
def __init__(self, config): | |
super().__init__() | |
embed_dim = config.d_model | |
dropout = config.dropout_rate | |
# Feed-forward 1 | |
self.ffn1_layer_norm = nn.LayerNorm(embed_dim) | |
self.ffn1 = ConformerYMT3FeedForward(config) | |
# Self-Attention | |
self.self_attn_layer_norm = nn.LayerNorm(embed_dim) | |
self.self_attn_dropout = torch.nn.Dropout(dropout) | |
self.self_attn = ConformerYMT3SelfAttention(config) | |
# Conformer Convolution | |
self.conv_module = ConformerYMT3ConvolutionModule(config) | |
# Feed-forward 2 | |
self.ffn2_layer_norm = nn.LayerNorm(embed_dim) | |
self.ffn2 = ConformerYMT3FeedForward(config) | |
self.final_layer_norm = nn.LayerNorm(embed_dim) | |
def forward( | |
self, | |
hidden_states, | |
attention_mask: Optional[torch.Tensor] = None, | |
relative_position_embeddings: Optional[torch.Tensor] = None, | |
output_attentions: bool = False, | |
): | |
hidden_states = hidden_states | |
# 1. Feed-Forward 1 layer | |
residual = hidden_states | |
hidden_states = self.ffn1_layer_norm(hidden_states) | |
hidden_states = self.ffn1(hidden_states) | |
hidden_states = hidden_states * 0.5 + residual | |
residual = hidden_states | |
# 2. Self-Attention layer | |
hidden_states = self.self_attn_layer_norm(hidden_states) | |
hidden_states, attn_weigts = self.self_attn( | |
hidden_states=hidden_states, | |
attention_mask=attention_mask, | |
relative_position_embeddings=relative_position_embeddings, | |
output_attentions=output_attentions, | |
) | |
hidden_states = self.self_attn_dropout(hidden_states) | |
hidden_states = hidden_states + residual | |
# 3. Convolutional Layer | |
residual = hidden_states | |
hidden_states = self.conv_module(hidden_states) | |
hidden_states = residual + hidden_states | |
# 4. Feed-Forward 2 Layer | |
residual = hidden_states | |
hidden_states = self.ffn2_layer_norm(hidden_states) | |
hidden_states = self.ffn2(hidden_states) | |
hidden_states = hidden_states * 0.5 + residual | |
hidden_states = self.final_layer_norm(hidden_states) | |
return hidden_states, attn_weigts | |
class ConformerYMT3Encoder(nn.Module): | |
def __init__(self, config): | |
super().__init__() | |
self.config = config | |
if config.position_encoding_type == "relative": | |
self.embed_positions = Wav2Vec2ConformerRelPositionalEmbedding(config) | |
elif config.position_encoding_type == "rotary": | |
self.embed_positions = Wav2Vec2ConformerRotaryPositionalEmbedding(config) | |
else: | |
self.embed_positions = None | |
# self.pos_conv_embed = Wav2Vec2ConformerPositionalConvEmbedding(config) | |
self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps) | |
self.dropout = nn.Dropout(config.dropout_rate) | |
self.layers = nn.ModuleList([ConformerYMT3EncoderLayer(config) for _ in range(config.num_layers)]) | |
self.gradient_checkpointing = False | |
def forward( | |
self, | |
inputs_embeds: torch.FloatTensor, # (B, T, D) | |
attention_mask: Optional[torch.FloatTensor] = None, | |
output_attentions: Optional[bool] = False, | |
output_hidden_states: Optional[bool] = False, | |
return_dict: Optional[bool] = True, | |
): | |
if output_attentions is None: | |
output_attentions = self.config.output_attentions | |
if output_hidden_states is None: | |
output_hidden_states = self.config.output_hidden_states | |
if return_dict is None: | |
return_dict = self.config.use_return_dict | |
all_hidden_states = () if output_hidden_states else None | |
all_self_attentions = () if output_attentions else None | |
# inputs_embeds as hidden_states | |
hidden_states = inputs_embeds | |
if attention_mask is not None: | |
# make sure padded tokens output 0 | |
hidden_states[~attention_mask] = 0.0 | |
# extend attention_mask | |
attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype) | |
attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min | |
attention_mask = attention_mask.expand(attention_mask.shape[0], 1, attention_mask.shape[-1], | |
attention_mask.shape[-1]) | |
hidden_states = self.dropout(hidden_states) | |
if self.embed_positions is not None: | |
relative_position_embeddings = self.embed_positions(hidden_states) | |
else: | |
relative_position_embeddings = None | |
for i, layer in enumerate(self.layers): | |
if output_hidden_states: | |
all_hidden_states = all_hidden_states + (hidden_states,) | |
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) | |
dropout_probability = torch.rand([]) | |
skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False | |
if not skip_the_layer: | |
# under deepspeed zero3 all gpus must run in sync | |
if self.gradient_checkpointing and self.training: | |
# create gradient checkpointing function | |
def create_custom_forward(module): | |
def custom_forward(*inputs): | |
return module(*inputs, output_attentions) | |
return custom_forward | |
layer_outputs = torch.utils.checkpoint.checkpoint( | |
create_custom_forward(layer), | |
hidden_states, | |
attention_mask, | |
relative_position_embeddings, | |
) | |
else: | |
layer_outputs = layer( | |
hidden_states, | |
attention_mask=attention_mask, | |
relative_position_embeddings=relative_position_embeddings, | |
output_attentions=output_attentions, | |
) | |
hidden_states = layer_outputs[0] | |
if skip_the_layer: | |
layer_outputs = (None, None) | |
if output_attentions: | |
all_self_attentions = all_self_attentions + (layer_outputs[1],) | |
hidden_states = self.layer_norm(hidden_states) | |
if output_hidden_states: | |
all_hidden_states = all_hidden_states + (hidden_states,) | |
if not return_dict: | |
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) | |
return BaseModelOutput( | |
last_hidden_state=hidden_states, | |
hidden_states=all_hidden_states, | |
attentions=all_self_attentions, | |
) | |
def test(): | |
import torch | |
from model.conformer_mod import ConformerYMT3Encoder | |
from model.conformer_helper import ConformerYMT3Config | |
from model.ops import count_parameters | |
config = ConformerYMT3Config() | |
encoder = ConformerYMT3Encoder(config) | |
encoder.eval() | |
# num params: 48,468,992 w/ intermediate_size=2048 | |
# num params: 23,278,592 w/ intermediate_size=512 | |
x = torch.randn(2, 256, 512) # (B, T, D) | |
enc_hs = encoder.forward(inputs_embeds=x)['last_hidden_state'] # (B, T, D) | |