|
import torch |
|
import torch.nn as nn |
|
from torch.nn import CrossEntropyLoss, functional as F |
|
from transformers import PreTrainedModel, GPT2PreTrainedModel |
|
from .configuration_gpt_optimized import GPTOptimConfig |
|
from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions, BaseModelOutputWithPastAndCrossAttentions |
|
from transformers.utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward |
|
from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask_for_sdpa, _prepare_4d_causal_attention_mask_for_sdpa |
|
from typing import Optional, Tuple, Union |
|
|
|
_CHECKPOINT_FOR_DOC = "openai-community/gpt2" |
|
_CONFIG_FOR_DOC = "GPT2Config" |
|
|
|
GPT2_INPUTS_DOCSTRING = r""" |
|
Args: |
|
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): |
|
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else |
|
`past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input |
|
sequence tokens in the vocabulary. |
|
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as |
|
`input_ids`. |
|
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
|
[`PreTrainedTokenizer.__call__`] for details. |
|
[What are input IDs?](../glossary#input-ids) |
|
past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`): |
|
Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see |
|
`past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have |
|
their past given to this model should not be passed as `input_ids` as they have already been computed. |
|
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): |
|
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
|
- 1 for tokens that are **not masked**, |
|
- 0 for tokens that are **masked**. |
|
If `past_key_values` is used, `attention_mask` needs to contain the masking strategy that was used for |
|
`past_key_values`. In other words, the `attention_mask` always has to have the length: |
|
`len(past_key_values) + len(input_ids)` |
|
[What are attention masks?](../glossary#attention-mask) |
|
token_type_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*): |
|
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, |
|
1]`: |
|
- 0 corresponds to a *sentence A* token, |
|
- 1 corresponds to a *sentence B* token. |
|
[What are token type IDs?](../glossary#token-type-ids) |
|
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
|
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, |
|
config.max_position_embeddings - 1]`. |
|
[What are position IDs?](../glossary#position-ids) |
|
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): |
|
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: |
|
- 1 indicates the head is **not masked**, |
|
- 0 indicates the head is **masked**. |
|
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): |
|
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This |
|
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the |
|
model's internal embedding lookup matrix. |
|
If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see |
|
`past_key_values`). |
|
use_cache (`bool`, *optional*): |
|
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see |
|
`past_key_values`). |
|
output_attentions (`bool`, *optional*): |
|
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned |
|
tensors for more detail. |
|
output_hidden_states (`bool`, *optional*): |
|
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for |
|
more detail. |
|
return_dict (`bool`, *optional*): |
|
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
|
""" |
|
|
|
class CausalSelfAttention(nn.Module): |
|
|
|
def __init__(self, config): |
|
super().__init__() |
|
assert config.n_embd % config.n_head == 0 |
|
|
|
self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd) |
|
|
|
self.c_proj = nn.Linear(config.n_embd, config.n_embd) |
|
self.c_proj.NANOGPT_SCALE_INIT = 1 |
|
|
|
self.n_head = config.n_head |
|
self.n_embd = config.n_embd |
|
|
|
def forward(self, x): |
|
B, T, C = x.size() |
|
|
|
|
|
|
|
qkv = self.c_attn(x) |
|
q, k, v = qkv.split(self.n_embd, dim=2) |
|
k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
|
q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
|
v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) |
|
y = F.scaled_dot_product_attention(q, k, v, is_causal=True) |
|
y = y.transpose(1, 2).contiguous().view(B, T, C) |
|
|
|
y = self.c_proj(y) |
|
return y |
|
|
|
class MLP(nn.Module): |
|
|
|
def __init__(self, config): |
|
super().__init__() |
|
self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd) |
|
self.gelu = nn.GELU(approximate='tanh') |
|
self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd) |
|
self.c_proj.NANOGPT_SCALE_INIT = 1 |
|
|
|
def forward(self, x): |
|
x = self.c_fc(x) |
|
x = self.gelu(x) |
|
x = self.c_proj(x) |
|
return x |
|
|
|
class Block(nn.Module): |
|
|
|
def __init__(self, config): |
|
super().__init__() |
|
self.ln_1 = nn.LayerNorm(config.n_embd) |
|
self.attn = CausalSelfAttention(config) |
|
self.ln_2 = nn.LayerNorm(config.n_embd) |
|
self.mlp = MLP(config) |
|
|
|
def forward(self, x): |
|
x = x + self.attn(self.ln_1(x)) |
|
x = x + self.mlp(self.ln_2(x)) |
|
return x |
|
|
|
class GPT(nn.Module): |
|
|
|
def __init__(self, config): |
|
super().__init__() |
|
self.config = config |
|
|
|
self.transformer = nn.ModuleDict(dict( |
|
wte = nn.Embedding(config.vocab_size, config.n_embd), |
|
wpe = nn.Embedding(config.block_size, config.n_embd), |
|
h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]), |
|
ln_f = nn.LayerNorm(config.n_embd), |
|
)) |
|
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) |
|
|
|
|
|
self.transformer.wte.weight = self.lm_head.weight |
|
|
|
|
|
self.apply(self._init_weights) |
|
|
|
def _init_weights(self, module): |
|
if isinstance(module, nn.Linear): |
|
std = 0.02 |
|
if hasattr(module, 'NANOGPT_SCALE_INIT'): |
|
std *= (2 * self.config.n_layer) ** -0.5 |
|
torch.nn.init.normal_(module.weight, mean=0.0, std=std) |
|
if module.bias is not None: |
|
torch.nn.init.zeros_(module.bias) |
|
elif isinstance(module, nn.Embedding): |
|
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) |
|
|
|
class GPTOptim(GPT2PreTrainedModel): |
|
config_class = GPTOptimConfig |
|
|
|
def __init__(self, config): |
|
super().__init__(config) |
|
self.model = GPT( |
|
config |
|
) |
|
self.config = config |
|
|
|
def forward(self, input_ids, labels=None): |
|
|
|
B, T = input_ids.size() |
|
assert T <= self.config.block_size, f"Cannot forward sequence of length {T}, block size is only {self.config.block_size}" |
|
|
|
pos = torch.arange(0, T, dtype=torch.long, device=input_ids.device) |
|
pos_emb = self.model.transformer.wpe(pos) |
|
tok_emb = self.model.transformer.wte(input_ids) |
|
x = tok_emb + pos_emb |
|
|
|
for block in self.model.transformer.h: |
|
x = block(x) |
|
|
|
x = self.model.transformer.ln_f(x) |
|
logits = self.model.lm_head(x) |
|
loss = None |
|
if labels is not None: |
|
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), labels.view(-1), ignore_index=self.config.eos_token_id) |
|
return logits, loss |