chore(root): add initial files
Browse files- configuration_phi3_small.py +250 -0
- tokenization_phi3_small.py +313 -0
configuration_phi3_small.py
ADDED
@@ -0,0 +1,250 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
|
3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
from typing import Any, Dict, List, Optional, Union
|
17 |
+
|
18 |
+
from transformers.configuration_utils import PretrainedConfig
|
19 |
+
from transformers.utils import logging
|
20 |
+
|
21 |
+
from functools import cached_property
|
22 |
+
|
23 |
+
""" Phi3Small model configuration """
|
24 |
+
logger = logging.get_logger(__name__)
|
25 |
+
|
26 |
+
|
27 |
+
def next_mult(x, y):
|
28 |
+
return (x + y - 1) // y * y
|
29 |
+
|
30 |
+
class Phi3SmallConfig(PretrainedConfig):
|
31 |
+
"""
|
32 |
+
This is the configuration class to store the configuration of a `Phi3Small` model. It is used to
|
33 |
+
instantiate a Phi-3-small model according to the specified arguments, defining the model architecture.
|
34 |
+
Instantiating a configuration with the defaults will yield a similar configuration to that of the Phi-3-small
|
35 |
+
[phi3](https://arxiv.org/pdf/2404.14219) architecture.
|
36 |
+
|
37 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
38 |
+
documentation from [`PretrainedConfig`] for more information.
|
39 |
+
|
40 |
+
|
41 |
+
Args:
|
42 |
+
vocab_size (`int`, *optional*, defaults to 100352):
|
43 |
+
Vocabulary size of the Phi3Small model. Defines the number of different tokens that can be represented by the
|
44 |
+
`inputs_ids` passed when calling `Phi3Small`.
|
45 |
+
max_position_embeddings (`int`, *optional*, defaults to 8192):
|
46 |
+
The maximum sequence length that this model might safely be used with.
|
47 |
+
rope_embedding_base (`float`, *optional*, defaults to 10^6):
|
48 |
+
The base value for the RoPE (Relative Position Encoding) embedding.
|
49 |
+
rope_position_scale (`float`, *optional*, defaults to 1.0):
|
50 |
+
The scale factor for the RoPE position encoding.
|
51 |
+
rope_scaling (`Optional[Dict[str, Union[float, List[float], int]]]`, *optional*, defaults to None):
|
52 |
+
The scaling configuration used for LongRoPE.
|
53 |
+
hidden_size (`int`, *optional*, defaults to 4096):
|
54 |
+
The size of the hidden layers in the model.
|
55 |
+
num_hidden_layers (`int`, *optional*, defaults to 32):
|
56 |
+
The number of layers in the model.
|
57 |
+
num_attention_heads (`int`, *optional*, defaults to 32):
|
58 |
+
The number of query heads in the model.
|
59 |
+
num_key_value_heads (`int`, *optional*, defaults to 8):
|
60 |
+
The number of key-value heads in the model.
|
61 |
+
hidden_act (`str`, *optional*, defaults to "gegelu"):
|
62 |
+
The activation function used in the model.
|
63 |
+
gegelu_limit (`float`, *optional*, defaults to 20.0):
|
64 |
+
The limit value for the GELU activation function (for numerical stability).
|
65 |
+
gegelu_pad_to_256 (`bool`, *optional*, defaults to True):
|
66 |
+
Whether to pad the intermediate size to a multiple of 256 (for faster matmul ops).
|
67 |
+
ff_dim_multiplier (`Optional[int]`, *optional*, defaults to None):
|
68 |
+
The dimension multiplier for the feed-forward layers.
|
69 |
+
ff_intermediate_size (`Optional[int]`, *optional*, defaults to 14336):
|
70 |
+
The intermediate size for the feed-forward layers.
|
71 |
+
One of `ff_dim_multiplier` or `ff_intermediate_size` must be specified.
|
72 |
+
blocksparse_homo_head_pattern (`bool`, *optional*, defaults to False):
|
73 |
+
Whether to use a homogeneous head pattern for block-sparse attention.
|
74 |
+
blocksparse_block_size (`int`, *optional*, defaults to 64):
|
75 |
+
The block size for block-sparse attention.
|
76 |
+
blocksparse_num_local_blocks (`int`, *optional*, defaults to 16):
|
77 |
+
The number of local blocks for block-sparse attention.
|
78 |
+
The local window used in blocksparse equals `blocksparse_num_local_blocks * blocksparse_block_size`
|
79 |
+
blocksparse_vert_stride (`int`, *optional*, defaults to 8):
|
80 |
+
The vertical stride for block-sparse attention.
|
81 |
+
blocksparse_triton_kernel_block_size (`int`, *optional*, defaults to 64):
|
82 |
+
The kernel block size for block-sparse attention.
|
83 |
+
dense_attention_every_n_layers (`Optional[int]`, *optional*, defaults to 2):
|
84 |
+
The frequency of all dense attention layers in the model
|
85 |
+
embedding_dropout_prob (`float`, *optional*, defaults to 0.1):
|
86 |
+
The dropout probability for the embedding layer.
|
87 |
+
attention_dropout_prob (`float`, *optional*, defaults to 0.0):
|
88 |
+
The dropout probability for the attention layers.
|
89 |
+
ffn_dropout_prob (`float`, *optional*, defaults to 0.1):
|
90 |
+
The dropout probability for the feed-forward layers.
|
91 |
+
layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
|
92 |
+
The epsilon value for layer normalization.
|
93 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
94 |
+
The range for weight initialization.
|
95 |
+
mup_use_scaling (`bool`, *optional*, defaults to True):
|
96 |
+
Whether to use scaling for MuP parameters (see: https://arxiv.org/abs/2203.03466).
|
97 |
+
mup_width_multiplier (`bool`, *optional*, defaults to 8.0):
|
98 |
+
The width multiplier for MuP.
|
99 |
+
mup_embedding_multiplier (`bool`, *optional*, defaults to 10.0):
|
100 |
+
The embedding multiplier for MuP.
|
101 |
+
mup_attn_multiplier (`bool`, *optional*, defaults to 1.0):
|
102 |
+
The attention multiplier for MuP.
|
103 |
+
use_cache (`bool`, *optional*, defaults to True):
|
104 |
+
Whether to use cache for the model.
|
105 |
+
bos_token_id (`int`, *optional*, defaults to 100257):
|
106 |
+
The token ID for the beginning of sentence.
|
107 |
+
eos_token_id (`int`, *optional*, defaults to 100257):
|
108 |
+
The token ID for the end of sentence.
|
109 |
+
reorder_and_upcast_attn (`bool`, *optional*, defaults to False):
|
110 |
+
Whether to reorder and upcast attention.
|
111 |
+
pad_sequence_to_multiple_of_64 (`bool`, *optional*, defaults to True):
|
112 |
+
Whether to pad the sequence length to a multiple of 64.
|
113 |
+
**kwargs:
|
114 |
+
Additional keyword arguments.
|
115 |
+
|
116 |
+
Example:
|
117 |
+
|
118 |
+
```python
|
119 |
+
>>> from transformers import Phi3SmallConfig, Phi3SmallModel
|
120 |
+
|
121 |
+
>>> # Initializing a Phi3Small configuration
|
122 |
+
>>> configuration = Phi3SmallConfig()
|
123 |
+
|
124 |
+
>>> # Initializing a model (with random weights) from the configuration
|
125 |
+
>>> model = Phi3SmallModel(configuration)
|
126 |
+
|
127 |
+
>>> # Accessing the model configuration
|
128 |
+
>>> configuration = model.config
|
129 |
+
```
|
130 |
+
"""
|
131 |
+
|
132 |
+
model_type = "phi3small"
|
133 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
134 |
+
|
135 |
+
|
136 |
+
def __init__(
|
137 |
+
self,
|
138 |
+
# General information about the model
|
139 |
+
vocab_size: int =100352,
|
140 |
+
max_position_embeddings: int = 8192,
|
141 |
+
# RoPE Related Parameters
|
142 |
+
rope_embedding_base: float = 10**6,
|
143 |
+
rope_position_scale: float = 1.0,
|
144 |
+
rope_scaling: Optional[Dict[str, Union[float, List[float], int]]] = None,
|
145 |
+
# General Model Parameters
|
146 |
+
hidden_size: int = 4096,
|
147 |
+
num_hidden_layers: int = 32,
|
148 |
+
# KV Shared Attention Configurations
|
149 |
+
num_attention_heads: int = 32,
|
150 |
+
num_key_value_heads: int = 8,
|
151 |
+
# GEGELU Related Parameters
|
152 |
+
hidden_act: str = "gegelu",
|
153 |
+
gegelu_limit: float = 20.0,
|
154 |
+
gegelu_pad_to_256: bool = True,
|
155 |
+
ff_dim_multiplier: Optional[int] = None,
|
156 |
+
ff_intermediate_size: Optional[int] = 14336,
|
157 |
+
# Block Sparse Attention Parameters
|
158 |
+
blocksparse_homo_head_pattern: bool = False,
|
159 |
+
blocksparse_block_size: int = 64,
|
160 |
+
blocksparse_num_local_blocks: int = 16,
|
161 |
+
blocksparse_vert_stride: int = 8,
|
162 |
+
blocksparse_triton_kernel_block_size: int = 64,
|
163 |
+
# Frequency of block-sparsity
|
164 |
+
dense_attention_every_n_layers: Optional[int] = 2,
|
165 |
+
# Reegularization parameters
|
166 |
+
embedding_dropout_prob: float =0.1,
|
167 |
+
attention_dropout_prob: float = 0.0,
|
168 |
+
ffn_dropout_prob: float = 0.1,
|
169 |
+
layer_norm_epsilon=1e-5,
|
170 |
+
initializer_range=0.02,
|
171 |
+
# MuP parameters
|
172 |
+
mup_use_scaling: bool = True,
|
173 |
+
mup_width_multiplier: bool = 8.0,
|
174 |
+
mup_embedding_multiplier: bool = 10.0,
|
175 |
+
mup_attn_multiplier: bool =1.0,
|
176 |
+
use_cache=True,
|
177 |
+
# The model does not have a bos token id
|
178 |
+
# However, in order for some of the downstream libraries to not break
|
179 |
+
# we set this to be the same as the eos_token_id
|
180 |
+
bos_token_id: int = 100257,
|
181 |
+
eos_token_id: int = 100257,
|
182 |
+
reorder_and_upcast_attn=False,
|
183 |
+
# Configuration to pad sequence length to a multiple of 64
|
184 |
+
pad_sequence_to_multiple_of_64: bool = True,
|
185 |
+
**kwargs,
|
186 |
+
):
|
187 |
+
self.vocab_size = vocab_size
|
188 |
+
self.max_position_embeddings = max_position_embeddings
|
189 |
+
self.rope_embedding_base = rope_embedding_base
|
190 |
+
self.rope_position_scale = rope_position_scale
|
191 |
+
self.rope_scaling = rope_scaling
|
192 |
+
self.hidden_size = hidden_size
|
193 |
+
# QK Shared Attention
|
194 |
+
self.num_hidden_layers = num_hidden_layers
|
195 |
+
self.num_attention_heads = num_attention_heads
|
196 |
+
self.num_key_value_heads = num_key_value_heads
|
197 |
+
# Block Sparse Attention Pattern
|
198 |
+
self.blocksparse_homo_head_pattern = blocksparse_homo_head_pattern
|
199 |
+
self.blocksparse_block_size = blocksparse_block_size
|
200 |
+
self.blocksparse_num_local_blocks = blocksparse_num_local_blocks
|
201 |
+
self.blocksparse_vert_stride = blocksparse_vert_stride
|
202 |
+
self.blocksparse_triton_kernel_block_size = blocksparse_triton_kernel_block_size
|
203 |
+
# Frequency of block sparsity
|
204 |
+
self.dense_attention_every_n_layers = dense_attention_every_n_layers
|
205 |
+
# Activation function
|
206 |
+
self.hidden_act = hidden_act
|
207 |
+
self.gegelu_limit = gegelu_limit
|
208 |
+
self.gegelu_pad_to_256 = gegelu_pad_to_256
|
209 |
+
self.ff_dim_multiplier = ff_dim_multiplier
|
210 |
+
self.ff_intermediate_size = ff_intermediate_size
|
211 |
+
if self.ff_dim_multiplier is None and self.ff_intermediate_size is None:
|
212 |
+
raise ValueError(f"Cannot have both {self.ff_dim_multiplier} and {self.ff_intermediate_size} as None")
|
213 |
+
if self.ff_dim_multiplier is not None and self.ff_intermediate_size is not None:
|
214 |
+
raise ValueError(f"Cannot specify both {self.ff_dim_multiplier} and {self.ff_intermediate_size}.")
|
215 |
+
# General regularization
|
216 |
+
self.embedding_dropout_prob = embedding_dropout_prob
|
217 |
+
self.attention_dropout_prob = attention_dropout_prob
|
218 |
+
self.ffn_dropout_prob = ffn_dropout_prob
|
219 |
+
self.layer_norm_epsilon = layer_norm_epsilon
|
220 |
+
self.initializer_range = initializer_range
|
221 |
+
# MuP parameters
|
222 |
+
self.mup_use_scaling = mup_use_scaling
|
223 |
+
self.mup_width_multiplier = mup_width_multiplier
|
224 |
+
self.mup_embedding_multiplier = mup_embedding_multiplier
|
225 |
+
self.mup_attn_multiplier = mup_attn_multiplier
|
226 |
+
self.use_cache = use_cache
|
227 |
+
|
228 |
+
self.reorder_and_upcast_attn = reorder_and_upcast_attn
|
229 |
+
self.pad_sequence_to_multiple_of_64 = pad_sequence_to_multiple_of_64
|
230 |
+
|
231 |
+
self.bos_token_id = bos_token_id
|
232 |
+
self.eos_token_id = eos_token_id
|
233 |
+
|
234 |
+
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
|
235 |
+
|
236 |
+
@cached_property
|
237 |
+
def dummy_token_indices(self) -> List[int]:
|
238 |
+
# Importing here to avoid circular imports
|
239 |
+
from .tokenization_phi3_small import Phi3SmallTokenizer
|
240 |
+
tokenizer = Phi3SmallTokenizer()
|
241 |
+
return tokenizer.dummy_token_indices
|
242 |
+
|
243 |
+
@property
|
244 |
+
def intermediate_size(self) -> int:
|
245 |
+
if self.ff_intermediate_size is not None:
|
246 |
+
return self.ff_intermediate_size
|
247 |
+
intermediate_size = (self.ff_dim_multiplier) * (self.hidden_size // 3) * 2
|
248 |
+
if self.gegelu_pad_to_256:
|
249 |
+
intermediate_size = next_mult(intermediate_size, 256)
|
250 |
+
return intermediate_size
|
tokenization_phi3_small.py
ADDED
@@ -0,0 +1,313 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Adapted from https://huggingface.co/Qwen/Qwen-7B-Chat/blob/main/tokenization_qwen.py
|
2 |
+
import os
|
3 |
+
from typing import Collection, List, Optional, Dict, Set, Tuple, Union
|
4 |
+
|
5 |
+
from functools import cached_property
|
6 |
+
|
7 |
+
import base64
|
8 |
+
|
9 |
+
from transformers import PreTrainedTokenizer, AddedToken, AutoConfig
|
10 |
+
from transformers.models.auto.tokenization_auto import get_tokenizer_config
|
11 |
+
import tiktoken
|
12 |
+
|
13 |
+
|
14 |
+
"""
|
15 |
+
This tokenizer is almost identical to tiktoken.get_encoding("cl100k_base")
|
16 |
+
with a few additional special tokens to support the ChatML format.
|
17 |
+
|
18 |
+
TODO(bapatra): Right now, I do not save the special tokens to the vocab file.
|
19 |
+
Maybe in the future, that would be useful? Can add that support later.
|
20 |
+
|
21 |
+
"""
|
22 |
+
|
23 |
+
def _load_tiktoken_bpe(tiktoken_bpe_file: str) -> Dict[bytes, int]:
|
24 |
+
with open(tiktoken_bpe_file, "rb") as f:
|
25 |
+
contents = f.read()
|
26 |
+
return {
|
27 |
+
base64.b64decode(token): int(rank)
|
28 |
+
for token, rank in (line.split() for line in contents.splitlines() if line)
|
29 |
+
}
|
30 |
+
|
31 |
+
# On the megatron codebase, we pad vocabularies to ensure matrix multiplication is fast.
|
32 |
+
# this in turn causes some indices to be empty. We account for these empty indices by adding
|
33 |
+
# dummy tokens to the tokenizer.
|
34 |
+
|
35 |
+
EFFECTIVE_PADDED_VOCAB_SIZE = 100352
|
36 |
+
ACTUAL_VOCAB_SIZE = 100276
|
37 |
+
|
38 |
+
|
39 |
+
DUMMY_TOKENS = {
|
40 |
+
f"<|dummy_id_{11 + offset}|>": 100276 + offset
|
41 |
+
for offset in range(1, EFFECTIVE_PADDED_VOCAB_SIZE - ACTUAL_VOCAB_SIZE)
|
42 |
+
}
|
43 |
+
|
44 |
+
SPECIAL_TOKENS = {
|
45 |
+
# tiktoken.get_encoding("cl100k_base")._special_tokens
|
46 |
+
'<|endoftext|>': 100257,
|
47 |
+
'<|fim_prefix|>': 100258,
|
48 |
+
'<|fim_middle|>': 100259,
|
49 |
+
'<|fim_suffix|>': 100260,
|
50 |
+
# Special tokens for post-training
|
51 |
+
"<|system|>": 100261,
|
52 |
+
"<|user|>": 100262,
|
53 |
+
"<|assistant|>": 100263,
|
54 |
+
# Dummy unused tokens
|
55 |
+
"<|dummy_id_0|>": 100264,
|
56 |
+
"<|dummy_id_1|>": 100265,
|
57 |
+
# Special tokens for post-training continued
|
58 |
+
"<|end|>": 100266,
|
59 |
+
# Some dummy tokens, so that tokenization is contiguous and does not cause issues
|
60 |
+
# Note that the 100256th token of tiktoken.get_encoding("cl100k_base") does not
|
61 |
+
# actually map to anything. So we use a dummy token here.
|
62 |
+
"<|dummy_id_2|>": 100256,
|
63 |
+
# Likewise, tokens from 100267 to 100275 are also unused
|
64 |
+
"<|dummy_id_3|>": 100267,
|
65 |
+
"<|dummy_id_4|>": 100268,
|
66 |
+
"<|dummy_id_5|>": 100269,
|
67 |
+
"<|dummy_id_6|>": 100270,
|
68 |
+
"<|dummy_id_7|>": 100271,
|
69 |
+
"<|dummy_id_8|>": 100272,
|
70 |
+
"<|dummy_id_9|>": 100273,
|
71 |
+
"<|dummy_id_10|>": 100274,
|
72 |
+
"<|dummy_id_11|>": 100275,
|
73 |
+
# The final end of prompt token
|
74 |
+
# (unused, but present as a part of tiktoken.get_encoding("cl100k_base")._special_tokens)
|
75 |
+
'<|endofprompt|>': 100276,
|
76 |
+
# Dummy tokens to account for padding of the tokenizer
|
77 |
+
# We pad to ensure tensor cores are used for vocab multiplication
|
78 |
+
**DUMMY_TOKENS
|
79 |
+
}
|
80 |
+
|
81 |
+
class Phi3SmallTokenizer(PreTrainedTokenizer):
|
82 |
+
vocab_files_names = {
|
83 |
+
"vocab_file": "cl100k_base.tiktoken"
|
84 |
+
}
|
85 |
+
|
86 |
+
model_input_names: List[str] = ["input_ids", "attention_mask"]
|
87 |
+
padding_side = "left"
|
88 |
+
|
89 |
+
def __init__(
|
90 |
+
self,
|
91 |
+
vocab_file: Optional[str] = None,
|
92 |
+
errors: str = "replace",
|
93 |
+
**kwargs
|
94 |
+
) -> None:
|
95 |
+
# PreTrainedTokenizer's init calls _add_tokens, which in turn checks
|
96 |
+
# if the token is present in `self.special_tokens``. Hence instantiating it here.
|
97 |
+
# The way Qwen gets around this is by checking against SPECIAL_TOKENS
|
98 |
+
# But I think it's better to check against the objects own `special_tokens`
|
99 |
+
# in case we eventually want to allow the tokenizer to have special tokens.
|
100 |
+
self.special_tokens = SPECIAL_TOKENS
|
101 |
+
|
102 |
+
super().__init__(**kwargs)
|
103 |
+
self.errors = errors
|
104 |
+
|
105 |
+
base = tiktoken.get_encoding("cl100k_base")
|
106 |
+
if vocab_file is None:
|
107 |
+
self.mergeable_ranks: Dict[bytes, int] = base._mergeable_ranks
|
108 |
+
else:
|
109 |
+
self.mergeable_ranks = _load_tiktoken_bpe(vocab_file)
|
110 |
+
|
111 |
+
self.pat_str = base._pat_str
|
112 |
+
|
113 |
+
enc = tiktoken.Encoding(
|
114 |
+
name="phi3small",
|
115 |
+
pat_str=self.pat_str,
|
116 |
+
mergeable_ranks=self.mergeable_ranks,
|
117 |
+
special_tokens=self.special_tokens,
|
118 |
+
)
|
119 |
+
self.tokenizer = enc
|
120 |
+
|
121 |
+
self.decoder: Dict[int, bytes] = {
|
122 |
+
v: k for k, v in self.mergeable_ranks.items()
|
123 |
+
}
|
124 |
+
self.decoder.update({v: k for k, v in self.special_tokens.items()})
|
125 |
+
|
126 |
+
self.eod_id = self.tokenizer.eot_token
|
127 |
+
self._eos_token = self._convert_id_to_token(self.eod_id)
|
128 |
+
|
129 |
+
# Setting the bos_token to be the same as the eos_token
|
130 |
+
# Note that this is **not** the correct thing to do, and is done
|
131 |
+
# just so that some of the downstream libraries do not break.
|
132 |
+
self._bos_token = self._eos_token
|
133 |
+
|
134 |
+
# Assign the special tokens to class variables
|
135 |
+
self.system_id = self.special_tokens["<|system|>"]
|
136 |
+
self.user_id = self.special_tokens["<|user|>"]
|
137 |
+
self.assistant_id = self.special_tokens["<|assistant|>"]
|
138 |
+
self.end_id = self.special_tokens["<|end|>"]
|
139 |
+
|
140 |
+
@cached_property
|
141 |
+
def dummy_token_indices(self) -> List[int]:
|
142 |
+
# There are some additional special tokens in the cl100k_base tokenizer
|
143 |
+
# that we do not use. Hence, we also consider them to be dummy tokens.
|
144 |
+
additional_tokens = [
|
145 |
+
"<|fim_prefix|>",
|
146 |
+
"<|fim_middle|>",
|
147 |
+
"<|fim_suffix|>",
|
148 |
+
"<|endofprompt|>"
|
149 |
+
]
|
150 |
+
dummy_token_indices = [index for token, index in self.special_tokens.items() if "dummy_id" in token]
|
151 |
+
dummy_token_indices.extend([self.special_tokens[token] for token in additional_tokens])
|
152 |
+
return sorted(dummy_token_indices)
|
153 |
+
|
154 |
+
def __getstate__(self):
|
155 |
+
state = self.__dict__.copy()
|
156 |
+
del state["tokenizer"]
|
157 |
+
return state
|
158 |
+
|
159 |
+
def __setstate__(self, state):
|
160 |
+
self.__dict__ = state
|
161 |
+
enc = tiktoken.Encoding(
|
162 |
+
name="cl100k_im",
|
163 |
+
pat_str=self.pat_str,
|
164 |
+
mergeable_ranks=self.mergeable_ranks,
|
165 |
+
special_tokens=self.special_tokens,
|
166 |
+
)
|
167 |
+
self.tokenizer = enc
|
168 |
+
|
169 |
+
def __len__(self):
|
170 |
+
return self.tokenizer.n_vocab
|
171 |
+
|
172 |
+
@classmethod
|
173 |
+
def from_pretrained(
|
174 |
+
cls,
|
175 |
+
pretrained_model_name_or_path: Union[str, os.PathLike],
|
176 |
+
*init_inputs,
|
177 |
+
**kwargs,
|
178 |
+
):
|
179 |
+
cls_kwargs = kwargs
|
180 |
+
# First try to load from the tokenization config if it exists
|
181 |
+
tokenization_config = get_tokenizer_config(pretrained_model_name_or_path, **kwargs)
|
182 |
+
if tokenization_config:
|
183 |
+
cls_kwargs = {
|
184 |
+
**tokenization_config,
|
185 |
+
**cls_kwargs
|
186 |
+
}
|
187 |
+
else:
|
188 |
+
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, trust_remote_code=True)
|
189 |
+
cls_kwargs["model_max_length"] = config.max_position_embeddings
|
190 |
+
return cls(**cls_kwargs)
|
191 |
+
|
192 |
+
def get_vocab(self) -> Dict[Union[str, bytes], int]:
|
193 |
+
return {**self.mergeable_ranks, **self.special_tokens}
|
194 |
+
|
195 |
+
def convert_tokens_to_ids(
|
196 |
+
self,
|
197 |
+
tokens: Union[bytes, str, List[Union[bytes, str]]]
|
198 |
+
) -> Union[int, List[int]]:
|
199 |
+
ids = []
|
200 |
+
if isinstance(tokens, (str, bytes)):
|
201 |
+
if tokens in self.special_tokens:
|
202 |
+
return self.special_tokens[tokens]
|
203 |
+
else:
|
204 |
+
return self.mergeable_ranks.get(tokens)
|
205 |
+
ids: List[int] = []
|
206 |
+
for token in tokens:
|
207 |
+
ids.append(self.convert_tokens_to_ids(token))
|
208 |
+
return ids
|
209 |
+
|
210 |
+
def _add_tokens(
|
211 |
+
self,
|
212 |
+
new_tokens: Union[List[str], List[AddedToken]],
|
213 |
+
special_tokens: bool = False,
|
214 |
+
) -> int:
|
215 |
+
if not special_tokens and new_tokens:
|
216 |
+
raise ValueError("Only special tokens can be added to this tokenizer")
|
217 |
+
for token in new_tokens:
|
218 |
+
surface_form = token.content if isinstance(token, AddedToken) else token
|
219 |
+
if surface_form not in self.special_tokens:
|
220 |
+
raise ValueError(
|
221 |
+
"For now, we do not support unknown special tokens\n"
|
222 |
+
"In the future, if there is a need for this, we can add special tokens to the tokenizer\n"
|
223 |
+
"starting from rank 100261 - 100263 and then 100266 - 100275.\n"
|
224 |
+
"And finally, we can re-construct the enc object back\n"
|
225 |
+
)
|
226 |
+
return 0
|
227 |
+
|
228 |
+
def save_vocabulary(self, save_directory: str, **kwargs) -> Tuple[str]:
|
229 |
+
file_path = os.path.join(save_directory, "cl100k_base.tiktoken")
|
230 |
+
with open(file_path, "w") as f:
|
231 |
+
for token, rank in self.mergeable_ranks.items():
|
232 |
+
line = base64.b64encode(token).decode("utf-8") + " " + str(rank) + "\n"
|
233 |
+
f.write(line)
|
234 |
+
return (file_path,)
|
235 |
+
|
236 |
+
def tokenize(
|
237 |
+
self,
|
238 |
+
text: str,
|
239 |
+
allowed_special: Union[Set, str] = "all",
|
240 |
+
disallowed_special: Union[Collection, str] = (),
|
241 |
+
**kwargs
|
242 |
+
) -> List[Union[bytes, str]]:
|
243 |
+
tokens: List[Union[bytes, str]] = []
|
244 |
+
for token_id in self.tokenizer.encode(
|
245 |
+
text, allowed_special=allowed_special, disallowed_special=disallowed_special
|
246 |
+
):
|
247 |
+
tokens.append(self.decoder[token_id])
|
248 |
+
return tokens
|
249 |
+
|
250 |
+
def convert_tokens_to_string(self, tokens: List[Union[bytes, str]]) -> str:
|
251 |
+
"""
|
252 |
+
Converts a sequence of tokens in a single string.
|
253 |
+
"""
|
254 |
+
text = ""
|
255 |
+
temp = b""
|
256 |
+
for t in tokens:
|
257 |
+
if isinstance(t, str):
|
258 |
+
if temp:
|
259 |
+
text += temp.decode("utf-8", errors=self.errors)
|
260 |
+
temp = b""
|
261 |
+
text += t
|
262 |
+
elif isinstance(t, bytes):
|
263 |
+
temp += t
|
264 |
+
else:
|
265 |
+
raise TypeError("token should only be of type types or str")
|
266 |
+
if temp:
|
267 |
+
text += temp.decode("utf-8", errors=self.errors)
|
268 |
+
return text
|
269 |
+
|
270 |
+
@property
|
271 |
+
def vocab_size(self):
|
272 |
+
return self.tokenizer.n_vocab
|
273 |
+
|
274 |
+
@property
|
275 |
+
def eos_token_id(self) -> int:
|
276 |
+
return self.eod_id
|
277 |
+
|
278 |
+
def _convert_id_to_token(self, index: int) -> Union[bytes, str]:
|
279 |
+
"""Converts an id to a token, special tokens included"""
|
280 |
+
if index in self.decoder:
|
281 |
+
return self.decoder[index]
|
282 |
+
raise ValueError("unknown ids")
|
283 |
+
|
284 |
+
def _convert_token_to_id(self, token: Union[bytes, str]) -> int:
|
285 |
+
"""Converts a token to an id using the vocab, special tokens included"""
|
286 |
+
if token in self.special_tokens:
|
287 |
+
return self.special_tokens[token]
|
288 |
+
if token in self.mergeable_ranks:
|
289 |
+
return self.mergeable_ranks[token]
|
290 |
+
raise ValueError("unknown token")
|
291 |
+
|
292 |
+
def _tokenize(self, text: str, **kwargs):
|
293 |
+
"""
|
294 |
+
Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based
|
295 |
+
vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
|
296 |
+
Do NOT take care of added tokens.
|
297 |
+
"""
|
298 |
+
raise NotImplementedError
|
299 |
+
|
300 |
+
def _decode(
|
301 |
+
self,
|
302 |
+
token_ids: Union[int, List[int]],
|
303 |
+
skip_special_tokens: bool = False,
|
304 |
+
errors: str = None,
|
305 |
+
**kwargs,
|
306 |
+
) -> str:
|
307 |
+
if isinstance(token_ids, int):
|
308 |
+
token_ids = [token_ids]
|
309 |
+
if skip_special_tokens:
|
310 |
+
token_ids = [i for i in token_ids if i < self.eod_id]
|
311 |
+
return self.tokenizer.decode(token_ids, errors=errors or self.errors)
|
312 |
+
|
313 |
+
|