damerajee commited on
Commit
2d60e7b
1 Parent(s): 8563c72

Create tokenizer_Llamoe.py

Browse files
Files changed (1) hide show
  1. tokenizer_Llamoe.py +289 -0
tokenizer_Llamoe.py ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from shutil import copyfile
3
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
4
+
5
+ import sentencepiece as spm
6
+
7
+ from transformers.utils import logging
8
+
9
+ from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
10
+
11
+
12
+ if TYPE_CHECKING:
13
+ pass
14
+
15
+ logger = logging.get_logger(__name__)
16
+
17
+ VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
18
+
19
+ SPIECE_UNDERLINE = "▁"
20
+
21
+ class GemmoeTokenizer(PreTrainedTokenizer):
22
+ """
23
+ Construct a Gemmoe tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is
24
+ no padding token in the original model.
25
+ Args:
26
+ vocab_file (`str`):
27
+ Path to the vocabulary file.
28
+ unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`):
29
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
30
+ token instead.
31
+ bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<bos>"`):
32
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
33
+ eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<eos>"`):
34
+ The end of sequence token.
35
+ pad_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<pad>"`):
36
+ A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
37
+ attention mechanisms or loss computation.
38
+ sp_model_kwargs (`Dict[str, Any]`, `Optional`, *optional*):
39
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
40
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
41
+ to set:
42
+ - `enable_sampling`: Enable subword regularization.
43
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
44
+ - `nbest_size = {0,1}`: No sampling is performed.
45
+ - `nbest_size > 1`: samples from the nbest_size results.
46
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
47
+ using forward-filtering-and-backward-sampling algorithm.
48
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
49
+ BPE-dropout.
50
+ add_bos_token (`bool`, *optional*, defaults to `True`):
51
+ Whether or not to add an `bos_token` at the start of sequences.
52
+ add_eos_token (`bool`, *optional*, defaults to `False`):
53
+ Whether or not to add an `eos_token` at the end of sequences.
54
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
55
+ Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
56
+ extra spaces.
57
+ use_default_system_prompt (`bool`, *optional*, defaults to `False`):
58
+ Whether or not the default system prompt for Gemmoe should be used.
59
+ spaces_between_special_tokens (`bool`, *optional*, defaults to `False`):
60
+ Whether or not to add spaces between special tokens.
61
+ """
62
+
63
+ vocab_files_names = VOCAB_FILES_NAMES
64
+ model_input_names = ["input_ids", "attention_mask"]
65
+
66
+ def __init__(
67
+ self,
68
+ vocab_file,
69
+ unk_token="<unk>",
70
+ bos_token="<bos>",
71
+ eos_token="<eos>",
72
+ pad_token="<pad>",
73
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
74
+ add_bos_token=True,
75
+ add_eos_token=False,
76
+ clean_up_tokenization_spaces=False,
77
+ use_default_system_prompt=False,
78
+ spaces_between_special_tokens=False,
79
+ **kwargs,
80
+ ):
81
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
82
+ bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token
83
+ eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token
84
+ unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token
85
+ pad_token = AddedToken(pad_token, normalized=False, special=True) if isinstance(pad_token, str) else pad_token
86
+
87
+ self.vocab_file = vocab_file
88
+ self.add_bos_token = add_bos_token
89
+ self.add_eos_token = add_eos_token
90
+ self.use_default_system_prompt = use_default_system_prompt
91
+
92
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
93
+ self.sp_model.Load(vocab_file)
94
+
95
+ super().__init__(
96
+ bos_token=bos_token,
97
+ eos_token=eos_token,
98
+ unk_token=unk_token,
99
+ pad_token=pad_token,
100
+ add_bos_token=add_bos_token,
101
+ add_eos_token=add_eos_token,
102
+ sp_model_kwargs=self.sp_model_kwargs,
103
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
104
+ use_default_system_prompt=use_default_system_prompt,
105
+ spaces_between_special_tokens=spaces_between_special_tokens,
106
+ **kwargs,
107
+ )
108
+
109
+ def __getstate__(self):
110
+ state = self.__dict__.copy()
111
+ state["sp_model"] = None
112
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
113
+ return state
114
+
115
+ def __setstate__(self, d):
116
+ self.__dict__ = d
117
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
118
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
119
+
120
+ @property
121
+ def vocab_size(self):
122
+ """Returns vocab size"""
123
+ return self.sp_model.get_piece_size()
124
+
125
+ def get_vocab(self):
126
+ """Returns vocab as a dict"""
127
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
128
+ vocab.update(self.added_tokens_encoder)
129
+ return vocab
130
+
131
+ def _tokenize(self, text, **kwargs):
132
+ """
133
+ Returns a tokenized string. The Gemmoe tokenizer never adds a prefix space.
134
+ """
135
+ return self.sp_model.encode(text, out_type=str)
136
+
137
+ def _convert_token_to_id(self, token):
138
+ """Converts a token (str) in an id using the vocab."""
139
+ return self.sp_model.piece_to_id(token)
140
+
141
+ def _convert_id_to_token(self, index):
142
+ """Converts an index (integer) in a token (str) using the vocab."""
143
+ token = self.sp_model.IdToPiece(index)
144
+ return token
145
+
146
+ def _decode(
147
+ self,
148
+ token_ids: List[int],
149
+ skip_special_tokens: bool = False,
150
+ spaces_between_special_tokens: bool = False,
151
+ **kwargs,
152
+ ) -> str:
153
+ sub_texts = []
154
+ current_sub_text = []
155
+ for ids in token_ids:
156
+ if skip_special_tokens and ids in self.all_special_ids:
157
+ continue
158
+ if ids in self._added_tokens_decoder:
159
+ if current_sub_text:
160
+ sub_texts.append(self.sp_model.decode(current_sub_text))
161
+ sub_texts.append(self._added_tokens_decoder[ids].content)
162
+ current_sub_text = []
163
+ else:
164
+ current_sub_text.append(ids)
165
+ if current_sub_text:
166
+ sub_texts.append(self.sp_model.decode(current_sub_text))
167
+ if spaces_between_special_tokens:
168
+ sub_texts = " ".join(sub_texts)
169
+ else:
170
+ sub_texts = "".join(sub_texts)
171
+ return sub_texts
172
+
173
+ def convert_tokens_to_string(self, tokens):
174
+ """Converts a sequence of tokens (string) in a single string."""
175
+ current_sub_tokens = []
176
+ out_string = ""
177
+ for token in tokens:
178
+ # make sure that special tokens are not decoded using sentencepiece model
179
+ if token in self._added_tokens_encoder:
180
+ out_string += self.sp_model.decode(current_sub_tokens) + token
181
+ current_sub_tokens = []
182
+ else:
183
+ current_sub_tokens.append(token)
184
+ out_string += self.sp_model.decode(current_sub_tokens)
185
+ return out_string
186
+
187
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
188
+ """
189
+ Save the vocabulary and special tokens file to a directory.
190
+ Args:
191
+ save_directory (`str`):
192
+ The directory in which to save the vocabulary.
193
+ Returns:
194
+ `Tuple(str)`: Paths to the files saved.
195
+ """
196
+ if not os.path.isdir(save_directory):
197
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
198
+ return
199
+ out_vocab_file = os.path.join(
200
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
201
+ )
202
+
203
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
204
+ copyfile(self.vocab_file, out_vocab_file)
205
+ elif not os.path.isfile(self.vocab_file):
206
+ with open(out_vocab_file, "wb") as fi:
207
+ content_spiece_model = self.sp_model.serialized_model_proto()
208
+ fi.write(content_spiece_model)
209
+
210
+ return (out_vocab_file,)
211
+
212
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
213
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
214
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
215
+ output = bos_token_id + token_ids_0 + eos_token_id
216
+ if token_ids_1 is not None:
217
+ output = output + bos_token_id + token_ids_1 + eos_token_id
218
+ return output
219
+
220
+ def get_special_tokens_mask(
221
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
222
+ ) -> List[int]:
223
+ """
224
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
225
+ special tokens using the tokenizer `prepare_for_model` method.
226
+ Args:
227
+ token_ids_0 (`List[int]`):
228
+ List of IDs.
229
+ token_ids_1 (`List[int]`, *optional*):
230
+ Optional second list of IDs for sequence pairs.
231
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
232
+ Whether or not the token list is already formatted with special tokens for the model.
233
+ Returns:
234
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
235
+ """
236
+ if already_has_special_tokens:
237
+ return super().get_special_tokens_mask(
238
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
239
+ )
240
+
241
+ bos_token_id = [1] if self.add_bos_token else []
242
+ eos_token_id = [1] if self.add_eos_token else []
243
+
244
+ if token_ids_1 is None:
245
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
246
+ return (
247
+ bos_token_id
248
+ + ([0] * len(token_ids_0))
249
+ + eos_token_id
250
+ + bos_token_id
251
+ + ([0] * len(token_ids_1))
252
+ + eos_token_id
253
+ )
254
+
255
+ def create_token_type_ids_from_sequences(
256
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
257
+ ) -> List[int]:
258
+ """
259
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
260
+ sequence pair mask has the following format:
261
+ ```
262
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
263
+ | first sequence | second sequence |
264
+ ```
265
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
266
+ Args:
267
+ token_ids_0 (`List[int]`):
268
+ List of ids.
269
+ token_ids_1 (`List[int]`, *optional*):
270
+ Optional second list of IDs for sequence pairs.
271
+ Returns:
272
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
273
+ """
274
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
275
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
276
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
277
+ if token_ids_1 is not None:
278
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
279
+ return output
280
+
281
+ def _build_conversation_input_ids(self, conversation: List[List[int]]) -> List[int]:
282
+ input_ids = []
283
+ for i, history in enumerate(conversation):
284
+ if i % 2 == 0:
285
+ input_ids.extend([self.bos_token_id, self.convert_tokens_to_ids("<start_of_turn>")] + history + [self.convert_tokens_to_ids("<end_of_turn>")])
286
+ else:
287
+ input_ids.extend([self.bos_token_id, self.convert_tokens_to_ids("<start_of_turn>"), self.convert_tokens_to_ids("model")] + history + [self.convert_tokens_to_ids("<end_of_turn>\n")])
288
+ input_ids.append(self.eos_token_id)
289
+ return input_ids