|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" XLM_ROBERTa_XL configuration""" |
|
|
|
from collections import OrderedDict |
|
from typing import Mapping |
|
|
|
from transformers.configuration_utils import PretrainedConfig |
|
from transformers.onnx import OnnxConfig |
|
from transformers.utils import logging |
|
|
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
|
|
|
|
|
|
|
|
class XLMRobertaXLConfig(PretrainedConfig): |
|
r""" |
|
This is the configuration class to store the configuration of a [`XLMRobertaXLModel`] or a [`TFXLMRobertaXLModel`]. |
|
It is used to instantiate a XLM_ROBERTA_XL model according to the specified arguments, defining the model |
|
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the |
|
XLM_ROBERTA_XL [facebook/xlm-roberta-xl](https://huggingface.co/facebook/xlm-roberta-xl) architecture. |
|
|
|
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the |
|
documentation from [`PretrainedConfig`] for more information. |
|
|
|
|
|
Args: |
|
vocab_size (`int`, *optional*, defaults to 250880): |
|
Vocabulary size of the XLM_ROBERTA_XL model. Defines the number of different tokens that can be represented |
|
by the `inputs_ids` passed when calling [`XLMRobertaXLModel`]. |
|
hidden_size (`int`, *optional*, defaults to 2560): |
|
Dimensionality of the encoder layers and the pooler layer. |
|
num_hidden_layers (`int`, *optional*, defaults to 36): |
|
Number of hidden layers in the Transformer encoder. |
|
num_attention_heads (`int`, *optional*, defaults to 32): |
|
Number of attention heads for each attention layer in the Transformer encoder. |
|
intermediate_size (`int`, *optional*, defaults to 10240): |
|
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. |
|
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): |
|
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, |
|
`"relu"`, `"silu"` and `"gelu_new"` are supported. |
|
hidden_dropout_prob (`float`, *optional*, defaults to 0.1): |
|
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. |
|
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): |
|
The dropout ratio for the attention probabilities. |
|
max_position_embeddings (`int`, *optional*, defaults to 514): |
|
The maximum sequence length that this model might ever be used with. Typically set this to something large |
|
just in case (e.g., 512 or 1024 or 2048). |
|
type_vocab_size (`int`, *optional*, defaults to 1): |
|
The vocabulary size of the `token_type_ids` passed when calling [`XLMRobertaXLModel`] or |
|
[`TFXLMRobertaXLModel`]. |
|
initializer_range (`float`, *optional*, defaults to 0.02): |
|
The standard deviation of the truncated_normal_initializer for initializing all weight matrices. |
|
layer_norm_eps (`float`, *optional*, defaults to 1e-5): |
|
The epsilon used by the layer normalization layers. |
|
position_embedding_type (`str`, *optional*, defaults to `"absolute"`): |
|
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For |
|
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to |
|
[Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). |
|
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models |
|
with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). |
|
use_cache (`bool`, *optional*, defaults to `True`): |
|
Whether or not the model should return the last key/values attentions (not used by all models). Only |
|
relevant if `config.is_decoder=True`. |
|
classifier_dropout (`float`, *optional*): |
|
The dropout ratio for the classification head. |
|
|
|
Examples: |
|
|
|
```python |
|
>>> from transformers import XLMRobertaXLConfig, XLMRobertaXLModel |
|
|
|
>>> # Initializing a XLM_ROBERTA_XL google-bert/bert-base-uncased style configuration |
|
>>> configuration = XLMRobertaXLConfig() |
|
|
|
>>> # Initializing a model (with random weights) from the google-bert/bert-base-uncased style configuration |
|
>>> model = XLMRobertaXLModel(configuration) |
|
|
|
>>> # Accessing the model configuration |
|
>>> configuration = model.config |
|
```""" |
|
|
|
model_type = "xlm-roberta-xl" |
|
|
|
def __init__( |
|
self, |
|
vocab_size=250880, |
|
hidden_size=2560, |
|
num_hidden_layers=36, |
|
num_attention_heads=32, |
|
intermediate_size=10240, |
|
hidden_act="gelu", |
|
hidden_dropout_prob=0.1, |
|
attention_probs_dropout_prob=0.1, |
|
max_position_embeddings=514, |
|
type_vocab_size=1, |
|
initializer_range=0.02, |
|
layer_norm_eps=1e-05, |
|
pad_token_id=1, |
|
bos_token_id=0, |
|
eos_token_id=2, |
|
position_embedding_type="absolute", |
|
use_cache=True, |
|
classifier_dropout=None, |
|
**kwargs, |
|
): |
|
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) |
|
self.vocab_size = vocab_size |
|
self.hidden_size = hidden_size |
|
self.num_hidden_layers = num_hidden_layers |
|
self.num_attention_heads = num_attention_heads |
|
self.hidden_act = hidden_act |
|
self.intermediate_size = intermediate_size |
|
self.hidden_dropout_prob = hidden_dropout_prob |
|
self.attention_probs_dropout_prob = attention_probs_dropout_prob |
|
self.max_position_embeddings = max_position_embeddings |
|
self.type_vocab_size = type_vocab_size |
|
self.initializer_range = initializer_range |
|
self.layer_norm_eps = layer_norm_eps |
|
self.position_embedding_type = position_embedding_type |
|
self.use_cache = use_cache |
|
self.classifier_dropout = classifier_dropout |
|
|
|
|
|
|
|
class XLMRobertaXLOnnxConfig(OnnxConfig): |
|
@property |
|
def inputs(self) -> Mapping[str, Mapping[int, str]]: |
|
if self.task == "multiple-choice": |
|
dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} |
|
else: |
|
dynamic_axis = {0: "batch", 1: "sequence"} |
|
return OrderedDict( |
|
[ |
|
("input_ids", dynamic_axis), |
|
("attention_mask", dynamic_axis), |
|
] |
|
) |
|
|
|
|