Update configuration_Llamoe.py
Browse files- configuration_Llamoe.py +54 -63
configuration_Llamoe.py
CHANGED
@@ -1,72 +1,63 @@
|
|
1 |
-
|
2 |
-
from
|
3 |
|
|
|
4 |
|
5 |
-
logger = logging.get_logger(__name__)
|
6 |
|
7 |
-
|
8 |
-
"
|
9 |
-
}
|
10 |
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
|
|
17 |
|
18 |
def __init__(
|
19 |
self,
|
20 |
-
vocab_size=
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
self.
|
46 |
-
self.
|
47 |
-
self.
|
48 |
-
self.
|
49 |
-
self.
|
50 |
-
self.num_attention_heads = num_attention_heads
|
51 |
-
self.head_dim = head_dim
|
52 |
-
self.hidden_act = hidden_act
|
53 |
-
self.hidden_activation = hidden_activation
|
54 |
-
self.num_key_value_heads = num_key_value_heads
|
55 |
-
self.initializer_range = initializer_range
|
56 |
-
self.rms_norm_eps = rms_norm_eps
|
57 |
-
self.use_cache = use_cache
|
58 |
-
self.rope_theta = rope_theta
|
59 |
-
self.attention_bias = attention_bias
|
60 |
-
self.attention_dropout = attention_dropout
|
61 |
self.num_experts_per_tok = num_experts_per_tok
|
62 |
self.num_local_experts = num_local_experts
|
63 |
-
self.
|
64 |
-
self.
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
from typing import Optional
|
3 |
|
4 |
+
from transformers import PretrainedConfig
|
5 |
|
|
|
6 |
|
7 |
+
class PhiConfig(PretrainedConfig):
|
8 |
+
"""Phi configuration."""
|
|
|
9 |
|
10 |
+
model_type = "llama"
|
11 |
+
attribute_map = {
|
12 |
+
"max_position_embeddings": "n_positions",
|
13 |
+
"hidden_size": "n_embd",
|
14 |
+
"num_attention_heads": "n_head",
|
15 |
+
"num_hidden_layers": "n_layer",
|
16 |
+
}
|
17 |
|
18 |
def __init__(
|
19 |
self,
|
20 |
+
vocab_size: int = 50304,
|
21 |
+
n_positions: int = 2048,
|
22 |
+
n_embd: int = 1024,
|
23 |
+
n_layer: int = 20,
|
24 |
+
n_inner: Optional[int] = None,
|
25 |
+
n_head: int = 16,
|
26 |
+
n_head_kv: Optional[int] = None,
|
27 |
+
num_experts_per_tok: int = 2,
|
28 |
+
num_local_experts: int = 4,
|
29 |
+
rotary_dim: Optional[int] = 32,
|
30 |
+
activation_function: Optional[str] = "gelu_new",
|
31 |
+
flash_attn: bool = False,
|
32 |
+
flash_rotary: bool = False,
|
33 |
+
fused_dense: bool = False,
|
34 |
+
attn_pdrop: float = 0.0,
|
35 |
+
embd_pdrop: float = 0.0,
|
36 |
+
resid_pdrop: float = 0.0,
|
37 |
+
layer_norm_epsilon: float = 1e-5,
|
38 |
+
initializer_range: float = 0.02,
|
39 |
+
tie_word_embeddings: bool = False,
|
40 |
+
pad_vocab_size_multiple: int = 64,
|
41 |
+
**kwargs
|
42 |
+
) -> None:
|
43 |
+
self.vocab_size = int(math.ceil(vocab_size / pad_vocab_size_multiple) * pad_vocab_size_multiple)
|
44 |
+
self.n_positions = n_positions
|
45 |
+
self.n_embd = n_embd
|
46 |
+
self.n_layer = n_layer
|
47 |
+
self.n_inner = n_inner
|
48 |
+
self.n_head = n_head
|
49 |
+
self.n_head_kv = n_head_kv
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
self.num_experts_per_tok = num_experts_per_tok
|
51 |
self.num_local_experts = num_local_experts
|
52 |
+
self.rotary_dim = min(rotary_dim, n_embd // n_head)
|
53 |
+
self.activation_function = activation_function
|
54 |
+
self.flash_attn = flash_attn
|
55 |
+
self.flash_rotary = flash_rotary
|
56 |
+
self.fused_dense = fused_dense
|
57 |
+
self.attn_pdrop = attn_pdrop
|
58 |
+
self.embd_pdrop = embd_pdrop
|
59 |
+
self.resid_pdrop = resid_pdrop
|
60 |
+
self.layer_norm_epsilon = layer_norm_epsilon
|
61 |
+
self.initializer_range = initializer_range
|
62 |
+
|
63 |
+
super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
|