|
""" GPT-J model with MoE. """ |
|
|
|
import warnings |
|
from typing import Optional, Tuple, Union |
|
|
|
import torch |
|
import torch.nn.functional as F |
|
|
|
from torch import nn |
|
|
|
from transformers.modeling_outputs import ( |
|
MoeCausalLMOutputWithPast, |
|
MoeModelOutputWithPast |
|
) |
|
from transformers.models.gptj.modeling_gptj import ( |
|
GPTJ_ATTENTION_CLASSES, |
|
GPTJMLP, |
|
GPTJPreTrainedModel |
|
) |
|
from transformers.utils import logging |
|
from transformers.utils.model_parallel_utils import assert_device_map, get_device_map |
|
|
|
from .configuration_gptj_moe import GPTJMoEConfig |
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
|
|
def load_balancing_loss_func( |
|
gate_logits: torch.Tensor, num_experts: torch.Tensor = None, top_k=2, attention_mask: Optional[torch.Tensor] = None |
|
) -> float: |
|
r""" |
|
Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch. |
|
|
|
See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss |
|
function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between |
|
experts is too unbalanced. |
|
|
|
Args: |
|
gate_logits (Union[`torch.Tensor`, Tuple[torch.Tensor]): |
|
Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of |
|
shape [batch_size X sequence_length, num_experts]. |
|
attention_mask (`torch.Tensor`, None): |
|
The attention_mask used in forward function |
|
shape [batch_size X sequence_length] if not None. |
|
num_experts (`int`, *optional*): |
|
Number of experts |
|
|
|
Returns: |
|
The auxiliary loss. |
|
""" |
|
if gate_logits is None or not isinstance(gate_logits, tuple): |
|
return 0 |
|
|
|
if isinstance(gate_logits, tuple): |
|
compute_device = gate_logits[0].device |
|
concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0) |
|
|
|
routing_weights = F.softmax(concatenated_gate_logits, dim=-1) |
|
|
|
_, selected_experts = torch.topk(routing_weights, top_k, dim=-1) |
|
|
|
expert_mask = F.one_hot(selected_experts, num_experts) |
|
|
|
if attention_mask is None: |
|
|
|
tokens_per_expert = torch.mean(expert_mask.float(), dim=0) |
|
|
|
|
|
router_prob_per_expert = torch.mean(routing_weights, dim=0) |
|
else: |
|
batch_size, sequence_length = attention_mask.shape |
|
num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length) |
|
|
|
|
|
expert_attention_mask = ( |
|
attention_mask[None, :, :, None, None] |
|
.expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts)) |
|
.reshape(-1, top_k, num_experts) |
|
.to(compute_device) |
|
) |
|
|
|
|
|
tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum( |
|
expert_attention_mask, dim=0 |
|
) |
|
|
|
|
|
router_per_expert_attention_mask = ( |
|
attention_mask[None, :, :, None] |
|
.expand((num_hidden_layers, batch_size, sequence_length, num_experts)) |
|
.reshape(-1, num_experts) |
|
.to(compute_device) |
|
) |
|
|
|
|
|
router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum( |
|
router_per_expert_attention_mask, dim=0 |
|
) |
|
|
|
overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0)) |
|
return overall_loss * num_experts |
|
|
|
|
|
class GPTJSparseMoE(nn.Module): |
|
""" |
|
This implementation is |
|
strictly equivalent to standard MoE with full capacity (no |
|
dropped tokens). It's faster since it formulates MoE operations |
|
in terms of block-sparse operations to accomodate imbalanced |
|
assignments of tokens to experts, whereas standard MoE either |
|
(1) drop tokens at the cost of reduced performance or (2) set |
|
capacity factor to number of experts and thus waste computation |
|
and memory on padding. |
|
""" |
|
|
|
def __init__(self, config): |
|
super().__init__() |
|
self.hidden_dim = config.n_embd |
|
self.ffn_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd |
|
self.num_experts = config.num_local_experts |
|
self.top_k = config.num_experts_per_tok |
|
|
|
|
|
self.gate = nn.Linear(self.hidden_dim, self.num_experts, bias=False) |
|
|
|
self.experts = nn.ModuleList([GPTJMLP(self.ffn_dim, config) for _ in range(self.num_experts)]) |
|
|
|
|
|
self.jitter_noise = config.router_jitter_noise |
|
|
|
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: |
|
""" """ |
|
batch_size, sequence_length, hidden_dim = hidden_states.shape |
|
if self.training and self.jitter_noise > 0: |
|
hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise) |
|
hidden_states = hidden_states.view(-1, hidden_dim) |
|
|
|
router_logits = self.gate(hidden_states) |
|
|
|
routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float) |
|
routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1) |
|
routing_weights /= routing_weights.sum(dim=-1, keepdim=True) |
|
|
|
routing_weights = routing_weights.to(hidden_states.dtype) |
|
|
|
final_hidden_states = torch.zeros( |
|
(batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device |
|
) |
|
|
|
|
|
|
|
expert_mask = F.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0) |
|
|
|
|
|
for expert_idx in range(self.num_experts): |
|
expert_layer = self.experts[expert_idx] |
|
idx, top_x = torch.where(expert_mask[expert_idx]) |
|
|
|
if top_x.shape[0] == 0: |
|
continue |
|
|
|
|
|
|
|
|
|
current_state = hidden_states[None, top_x].reshape(-1, hidden_dim) |
|
current_hidden_states = expert_layer(current_state) * routing_weights[top_x, idx, None] |
|
|
|
|
|
|
|
final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype)) |
|
final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim) |
|
return final_hidden_states, router_logits |
|
|
|
class GPTJMoEBlock(nn.Module): |
|
def __init__(self, config): |
|
super().__init__() |
|
self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) |
|
self.attn = GPTJ_ATTENTION_CLASSES[config._attn_implementation](config) |
|
self.block_sparse_moe = GPTJSparseMoE(config) |
|
|
|
def forward( |
|
self, |
|
hidden_states: Optional[torch.FloatTensor], |
|
layer_past: Optional[Tuple[torch.Tensor]] = None, |
|
attention_mask: Optional[torch.FloatTensor] = None, |
|
position_ids: Optional[torch.LongTensor] = None, |
|
head_mask: Optional[torch.FloatTensor] = None, |
|
use_cache: Optional[bool] = False, |
|
output_attentions: Optional[bool] = False, |
|
output_router_logits: Optional[bool] = False, |
|
) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]: |
|
residual = hidden_states |
|
hidden_states = self.ln_1(hidden_states) |
|
attn_outputs = self.attn( |
|
hidden_states=hidden_states, |
|
layer_past=layer_past, |
|
attention_mask=attention_mask, |
|
position_ids=position_ids, |
|
head_mask=head_mask, |
|
use_cache=use_cache, |
|
output_attentions=output_attentions, |
|
) |
|
attn_output = attn_outputs[0] |
|
outputs = attn_outputs[1:] |
|
|
|
feed_forward_hidden_states, router_logits = self.block_sparse_moe(hidden_states) |
|
hidden_states = attn_output + feed_forward_hidden_states + residual |
|
|
|
if use_cache: |
|
outputs = (hidden_states,) + outputs |
|
else: |
|
outputs = (hidden_states,) + outputs[1:] |
|
|
|
if output_router_logits: |
|
outputs = outputs + (router_logits,) |
|
|
|
return outputs |
|
|
|
class GPTJMoEModel(GPTJPreTrainedModel): |
|
def __init__(self, config): |
|
super().__init__(config) |
|
|
|
self.embed_dim = config.n_embd |
|
self.vocab_size = config.vocab_size |
|
self.wte = nn.Embedding(config.vocab_size, self.embed_dim) |
|
self.drop = nn.Dropout(config.embd_pdrop) |
|
self.h = nn.ModuleList([GPTJMoEBlock(config) for _ in range(config.n_layer)]) |
|
self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) |
|
|
|
|
|
self.model_parallel = False |
|
self.device_map = None |
|
self.gradient_checkpointing = False |
|
|
|
|
|
self.post_init() |
|
|
|
self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" |
|
|
|
def parallelize(self, device_map=None): |
|
warnings.warn( |
|
"`GPTJModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your" |
|
" model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" |
|
" `device_map` but it needs to be a dictionary module_name to device, so for instance {'h.0': 0, 'h.1': 1," |
|
" ...}", |
|
FutureWarning, |
|
) |
|
|
|
self.device_map = ( |
|
get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map |
|
) |
|
assert_device_map(self.device_map, len(self.h)) |
|
self.model_parallel = True |
|
self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys())) |
|
self.last_device = "cuda:" + str(max(self.device_map.keys())) |
|
self.wte = self.wte.to(self.first_device) |
|
|
|
for k, v in self.device_map.items(): |
|
for block in v: |
|
cuda_device = "cuda:" + str(k) |
|
self.h[block] = self.h[block].to(cuda_device) |
|
|
|
self.ln_f = self.ln_f.to(self.last_device) |
|
|
|
def deparallelize(self): |
|
warnings.warn( |
|
"Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", |
|
FutureWarning, |
|
) |
|
self.model_parallel = False |
|
self.device_map = None |
|
self.first_device = "cpu" |
|
self.last_device = "cpu" |
|
self.wte = self.wte.to("cpu") |
|
for index in range(len(self.h)): |
|
self.h[index] = self.h[index].to("cpu") |
|
self.ln_f = self.ln_f.to("cpu") |
|
torch.cuda.empty_cache() |
|
|
|
def get_input_embeddings(self): |
|
return self.wte |
|
|
|
def set_input_embeddings(self, new_embeddings): |
|
self.wte = new_embeddings |
|
|
|
def forward( |
|
self, |
|
input_ids: Optional[torch.LongTensor] = None, |
|
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
|
attention_mask: Optional[torch.FloatTensor] = None, |
|
token_type_ids: Optional[torch.LongTensor] = None, |
|
position_ids: Optional[torch.LongTensor] = None, |
|
head_mask: Optional[torch.FloatTensor] = None, |
|
inputs_embeds: Optional[torch.FloatTensor] = None, |
|
use_cache: Optional[bool] = None, |
|
output_attentions: Optional[bool] = None, |
|
output_hidden_states: Optional[bool] = None, |
|
output_router_logits: Optional[bool] = None, |
|
return_dict: Optional[bool] = None, |
|
) -> Union[Tuple, MoeModelOutputWithPast]: |
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
|
output_router_logits = ( |
|
output_router_logits if output_router_logits is not None else self.config.output_router_logits |
|
) |
|
output_hidden_states = ( |
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
|
) |
|
use_cache = use_cache if use_cache is not None else self.config.use_cache |
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
|
if input_ids is not None and inputs_embeds is not None: |
|
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") |
|
elif input_ids is not None: |
|
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) |
|
input_shape = input_ids.size() |
|
input_ids = input_ids.view(-1, input_shape[-1]) |
|
batch_size = input_ids.shape[0] |
|
elif inputs_embeds is not None: |
|
input_shape = inputs_embeds.size()[:-1] |
|
batch_size = inputs_embeds.shape[0] |
|
else: |
|
raise ValueError("You have to specify either input_ids or inputs_embeds") |
|
|
|
device = input_ids.device if input_ids is not None else inputs_embeds.device |
|
|
|
if token_type_ids is not None: |
|
token_type_ids = token_type_ids.view(-1, input_shape[-1]) |
|
|
|
if past_key_values is None: |
|
past_length = 0 |
|
past_key_values = tuple([None] * len(self.h)) |
|
else: |
|
past_length = past_key_values[0][0].size(-2) |
|
|
|
if position_ids is None: |
|
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) |
|
position_ids = position_ids.unsqueeze(0) |
|
|
|
if not self._use_flash_attention_2: |
|
|
|
if attention_mask is not None: |
|
if batch_size <= 0: |
|
raise ValueError("batch_size has to be defined and > 0") |
|
attention_mask = attention_mask.view(batch_size, -1) |
|
|
|
|
|
|
|
|
|
|
|
attention_mask = attention_mask[:, None, None, :] |
|
|
|
|
|
|
|
|
|
|
|
|
|
attention_mask = attention_mask.to(dtype=self.dtype) |
|
attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min |
|
|
|
|
|
|
|
|
|
|
|
head_mask = self.get_head_mask(head_mask, self.config.n_layer) |
|
|
|
if inputs_embeds is None: |
|
inputs_embeds = self.wte(input_ids) |
|
|
|
hidden_states = inputs_embeds |
|
|
|
if token_type_ids is not None: |
|
token_type_embeds = self.wte(token_type_ids) |
|
hidden_states = hidden_states + token_type_embeds |
|
|
|
hidden_states = self.drop(hidden_states) |
|
|
|
output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),) |
|
|
|
if self.gradient_checkpointing and self.training: |
|
if use_cache: |
|
logger.warning_once( |
|
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." |
|
) |
|
use_cache = False |
|
|
|
presents = () if use_cache else None |
|
all_self_attentions = () if output_attentions else None |
|
all_hidden_states = () if output_hidden_states else None |
|
all_router_logits = () if output_router_logits else None |
|
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): |
|
|
|
if self.model_parallel: |
|
torch.cuda.set_device(hidden_states.device) |
|
|
|
if layer_past is not None: |
|
layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past) |
|
|
|
if attention_mask is not None: |
|
attention_mask = attention_mask.to(hidden_states.device) |
|
if isinstance(head_mask, torch.Tensor): |
|
head_mask = head_mask.to(hidden_states.device) |
|
if output_hidden_states: |
|
all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
|
if self.gradient_checkpointing and self.training: |
|
outputs = self._gradient_checkpointing_func( |
|
block.__call__, |
|
hidden_states, |
|
None, |
|
attention_mask, |
|
position_ids, |
|
head_mask[i], |
|
use_cache, |
|
output_attentions, |
|
output_router_logits, |
|
) |
|
else: |
|
outputs = block( |
|
hidden_states=hidden_states, |
|
layer_past=layer_past, |
|
attention_mask=attention_mask, |
|
position_ids=position_ids, |
|
head_mask=head_mask[i], |
|
use_cache=use_cache, |
|
output_attentions=output_attentions, |
|
output_router_logits=output_router_logits, |
|
) |
|
|
|
hidden_states = outputs[0] |
|
if use_cache is True: |
|
presents = presents + (outputs[1],) |
|
|
|
if output_attentions: |
|
all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) |
|
|
|
if output_router_logits: |
|
all_router_logits = all_router_logits + (outputs[-1],) |
|
|
|
|
|
if self.model_parallel: |
|
for k, v in self.device_map.items(): |
|
if i == v[-1] and "cuda:" + str(k) != self.last_device: |
|
hidden_states = hidden_states.to("cuda:" + str(k + 1)) |
|
|
|
hidden_states = self.ln_f(hidden_states) |
|
|
|
hidden_states = hidden_states.view(output_shape) |
|
|
|
if output_hidden_states: |
|
all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
|
|
|
if output_router_logits: |
|
all_router_logits += (outputs[-1],) |
|
|
|
if not return_dict: |
|
return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) |
|
|
|
return MoeModelOutputWithPast( |
|
last_hidden_state=hidden_states, |
|
past_key_values=presents, |
|
hidden_states=all_hidden_states, |
|
attentions=all_self_attentions, |
|
router_logits=all_router_logits, |
|
) |
|
|
|
class GPTJMoEForCausalLM(GPTJPreTrainedModel): |
|
_tied_weights_keys = ["lm_head.weight"] |
|
|
|
def __init__(self, config): |
|
super().__init__(config) |
|
self.transformer = GPTJMoEModel(config) |
|
self.lm_head = nn.Linear(config.n_embd, config.vocab_size) |
|
|
|
|
|
self.model_parallel = False |
|
self.device_map = None |
|
|
|
|
|
self.router_aux_loss_coef = config.router_aux_loss_coef |
|
self.num_experts = config.num_local_experts |
|
self.num_experts_per_tok = config.num_experts_per_tok |
|
|
|
|
|
self.post_init() |
|
|
|
def parallelize(self, device_map=None): |
|
warnings.warn( |
|
"`GPTJForCausalLM.parallelize` is deprecated and will be removed in v5 of Transformers, you should load" |
|
" your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" |
|
" `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0':" |
|
" 0, 'transformer.h.1': 1, ...}", |
|
FutureWarning, |
|
) |
|
self.device_map = ( |
|
get_device_map(len(self.transformer.h), range(torch.cuda.device_count())) |
|
if device_map is None |
|
else device_map |
|
) |
|
assert_device_map(self.device_map, len(self.transformer.h)) |
|
self.transformer.parallelize(self.device_map) |
|
self.lm_head = self.lm_head.to(self.transformer.first_device) |
|
self.model_parallel = True |
|
|
|
def deparallelize(self): |
|
warnings.warn( |
|
"Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", |
|
FutureWarning, |
|
) |
|
self.transformer.deparallelize() |
|
self.transformer = self.transformer.to("cpu") |
|
self.lm_head = self.lm_head.to("cpu") |
|
self.model_parallel = False |
|
torch.cuda.empty_cache() |
|
|
|
def get_output_embeddings(self): |
|
return self.lm_head |
|
|
|
def set_output_embeddings(self, new_embeddings): |
|
self.lm_head = new_embeddings |
|
|
|
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, output_router_logits=False, **kwargs): |
|
token_type_ids = kwargs.get("token_type_ids", None) |
|
|
|
if past_key_values: |
|
past_length = past_key_values[0][0].shape[2] |
|
|
|
|
|
if input_ids.shape[1] > past_length: |
|
remove_prefix_length = past_length |
|
else: |
|
|
|
remove_prefix_length = input_ids.shape[1] - 1 |
|
|
|
input_ids = input_ids[:, remove_prefix_length:] |
|
if token_type_ids is not None: |
|
token_type_ids = token_type_ids[:, -input_ids.shape[1] :] |
|
|
|
attention_mask = kwargs.get("attention_mask", None) |
|
position_ids = kwargs.get("position_ids", None) |
|
|
|
if attention_mask is not None and position_ids is None: |
|
|
|
position_ids = attention_mask.long().cumsum(-1) - 1 |
|
position_ids.masked_fill_(attention_mask == 0, 1) |
|
if past_key_values: |
|
position_ids = position_ids[:, -input_ids.shape[1] :] |
|
|
|
|
|
if inputs_embeds is not None and past_key_values is None: |
|
model_inputs = {"inputs_embeds": inputs_embeds} |
|
else: |
|
model_inputs = {"input_ids": input_ids} |
|
|
|
model_inputs.update( |
|
{ |
|
"past_key_values": past_key_values, |
|
"use_cache": kwargs.get("use_cache"), |
|
"position_ids": position_ids, |
|
"attention_mask": attention_mask, |
|
"token_type_ids": token_type_ids, |
|
"output_router_logits": output_router_logits, |
|
} |
|
) |
|
|
|
return model_inputs |
|
|
|
def forward( |
|
self, |
|
input_ids: Optional[torch.LongTensor] = None, |
|
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
|
attention_mask: Optional[torch.FloatTensor] = None, |
|
token_type_ids: Optional[torch.LongTensor] = None, |
|
position_ids: Optional[torch.LongTensor] = None, |
|
head_mask: Optional[torch.FloatTensor] = None, |
|
inputs_embeds: Optional[torch.FloatTensor] = None, |
|
labels: Optional[torch.LongTensor] = None, |
|
use_cache: Optional[bool] = None, |
|
output_attentions: Optional[bool] = None, |
|
output_hidden_states: Optional[bool] = None, |
|
output_router_logits: Optional[bool] = None, |
|
return_dict: Optional[bool] = None, |
|
) -> Union[Tuple, MoeCausalLMOutputWithPast]: |
|
r""" |
|
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
|
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set |
|
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` |
|
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` |
|
""" |
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
|
transformer_outputs = self.transformer( |
|
input_ids, |
|
past_key_values=past_key_values, |
|
attention_mask=attention_mask, |
|
token_type_ids=token_type_ids, |
|
position_ids=position_ids, |
|
head_mask=head_mask, |
|
inputs_embeds=inputs_embeds, |
|
use_cache=use_cache, |
|
output_attentions=output_attentions, |
|
output_hidden_states=output_hidden_states, |
|
output_router_logits=output_router_logits, |
|
return_dict=return_dict, |
|
) |
|
hidden_states = transformer_outputs[0] |
|
|
|
|
|
if self.model_parallel: |
|
torch.cuda.set_device(self.transformer.first_device) |
|
hidden_states = hidden_states.to(self.lm_head.weight.device) |
|
|
|
|
|
|
|
|
|
lm_logits = self.lm_head(hidden_states).to(torch.float32) |
|
|
|
loss = None |
|
if labels is not None: |
|
|
|
labels = labels.to(lm_logits.device) |
|
|
|
shift_logits = lm_logits[..., :-1, :].contiguous() |
|
shift_labels = labels[..., 1:].contiguous() |
|
|
|
loss_fct = nn.CrossEntropyLoss() |
|
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) |
|
|
|
loss = loss.to(hidden_states.dtype) |
|
|
|
|
|
aux_loss = None |
|
if output_router_logits: |
|
aux_loss = load_balancing_loss_func( |
|
transformer_outputs.router_logits if return_dict else transformer_outputs[-1], |
|
self.num_experts, |
|
self.num_experts_per_tok, |
|
attention_mask, |
|
) |
|
if labels is not None: |
|
loss += self.router_aux_loss_coef * aux_loss.to(loss.device) |
|
|
|
if not return_dict: |
|
output = (lm_logits,) + transformer_outputs[1:] |
|
if output_router_logits: |
|
output = (aux_loss,) + output |
|
return ((loss,) + output) if loss is not None else output |
|
|
|
return MoeCausalLMOutputWithPast( |
|
loss=loss, |
|
aux_loss=aux_loss, |
|
logits=lm_logits, |
|
past_key_values=transformer_outputs.past_key_values, |
|
hidden_states=transformer_outputs.hidden_states, |
|
attentions=transformer_outputs.attentions, |
|
router_logits=transformer_outputs.router_logits |
|
) |
|
|
|
@staticmethod |
|
def _reorder_cache( |
|
past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor |
|
) -> Tuple[Tuple[torch.Tensor]]: |
|
""" |
|
This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or |
|
[`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct |
|
beam_idx at every generation step. |
|
""" |
|
return tuple( |
|
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past) |
|
for layer_past in past_key_values |
|
) |
|
|