kz919 commited on
Commit
ca763cc
1 Parent(s): 3c56054

Update configuration_sliding_llama.py

Browse files
Files changed (1) hide show
  1. configuration_sliding_llama.py +7 -31
configuration_sliding_llama.py CHANGED
@@ -21,7 +21,7 @@
21
 
22
  from transformers.configuration_utils import PretrainedConfig
23
  from transformers.utils import logging
24
-
25
 
26
  logger = logging.get_logger(__name__)
27
 
@@ -31,11 +31,8 @@ class LlamaConfig(PretrainedConfig):
31
  This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA
32
  model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
33
  defaults will yield a similar configuration to that of the LLaMA-7B.
34
-
35
  Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
  documentation from [`PretrainedConfig`] for more information.
37
-
38
-
39
  Args:
40
  vocab_size (`int`, *optional*, defaults to 32000):
41
  Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the
@@ -97,16 +94,12 @@ class LlamaConfig(PretrainedConfig):
97
  The dropout ratio for the attention probabilities.
98
  mlp_bias (`bool`, *optional*, defaults to `False`):
99
  Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
100
-
101
  ```python
102
  >>> from transformers import LlamaModel, LlamaConfig
103
-
104
  >>> # Initializing a LLaMA llama-7b style configuration
105
  >>> configuration = LlamaConfig()
106
-
107
  >>> # Initializing a model from the llama-7b style configuration
108
  >>> model = LlamaModel(configuration)
109
-
110
  >>> # Accessing the model configuration
111
  >>> configuration = model.config
112
  ```"""
@@ -159,37 +152,20 @@ class LlamaConfig(PretrainedConfig):
159
  self.use_cache = use_cache
160
  self.rope_theta = rope_theta
161
  self.rope_scaling = rope_scaling
162
- self._rope_scaling_validation()
163
  self.attention_bias = attention_bias
164
  self.attention_dropout = attention_dropout
165
  self.mlp_bias = mlp_bias
166
  self.sliding_windows = sliding_windows if sliding_windows is not None else [0 for _ in range(num_hidden_layers)]
167
  assert len(self.sliding_windows) == self.num_hidden_layers
168
-
 
 
 
 
169
  super().__init__(
170
  pad_token_id=pad_token_id,
171
  bos_token_id=bos_token_id,
172
  eos_token_id=eos_token_id,
173
  tie_word_embeddings=tie_word_embeddings,
174
  **kwargs,
175
- )
176
-
177
- def _rope_scaling_validation(self):
178
- """
179
- Validate the `rope_scaling` configuration.
180
- """
181
- if self.rope_scaling is None:
182
- return
183
-
184
- if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
185
- raise ValueError(
186
- "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
187
- )
188
- rope_scaling_type = self.rope_scaling.get("type", None)
189
- rope_scaling_factor = self.rope_scaling.get("factor", None)
190
- if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
191
- raise ValueError(
192
- f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
193
- )
194
- if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
195
- raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
 
21
 
22
  from transformers.configuration_utils import PretrainedConfig
23
  from transformers.utils import logging
24
+ from transformers.modeling_rope_utils import rope_config_validation
25
 
26
  logger = logging.get_logger(__name__)
27
 
 
31
  This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA
32
  model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
33
  defaults will yield a similar configuration to that of the LLaMA-7B.
 
34
  Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
  documentation from [`PretrainedConfig`] for more information.
 
 
36
  Args:
37
  vocab_size (`int`, *optional*, defaults to 32000):
38
  Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the
 
94
  The dropout ratio for the attention probabilities.
95
  mlp_bias (`bool`, *optional*, defaults to `False`):
96
  Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
 
97
  ```python
98
  >>> from transformers import LlamaModel, LlamaConfig
 
99
  >>> # Initializing a LLaMA llama-7b style configuration
100
  >>> configuration = LlamaConfig()
 
101
  >>> # Initializing a model from the llama-7b style configuration
102
  >>> model = LlamaModel(configuration)
 
103
  >>> # Accessing the model configuration
104
  >>> configuration = model.config
105
  ```"""
 
152
  self.use_cache = use_cache
153
  self.rope_theta = rope_theta
154
  self.rope_scaling = rope_scaling
 
155
  self.attention_bias = attention_bias
156
  self.attention_dropout = attention_dropout
157
  self.mlp_bias = mlp_bias
158
  self.sliding_windows = sliding_windows if sliding_windows is not None else [0 for _ in range(num_hidden_layers)]
159
  assert len(self.sliding_windows) == self.num_hidden_layers
160
+
161
+ if self.rope_scaling is not None and "type" in self.rope_scaling:
162
+ self.rope_scaling["rope_type"] = self.rope_scaling["type"]
163
+ rope_config_validation(self)
164
+
165
  super().__init__(
166
  pad_token_id=pad_token_id,
167
  bos_token_id=bos_token_id,
168
  eos_token_id=eos_token_id,
169
  tie_word_embeddings=tie_word_embeddings,
170
  **kwargs,
171
+ )