|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" Tokenization classes for KOSMOS-2 model.""" |
|
|
|
|
|
import os |
|
from shutil import copyfile |
|
from typing import List, Optional, Tuple |
|
|
|
from transformers.tokenization_utils import AddedToken |
|
from transformers.tokenization_utils_fast import PreTrainedTokenizerFast |
|
from transformers.utils import is_sentencepiece_available, logging |
|
|
|
|
|
if is_sentencepiece_available(): |
|
from .tokenization_kosmos2 import Kosmos2Tokenizer |
|
else: |
|
Kosmos2TokenizerFast = None |
|
|
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} |
|
|
|
PRETRAINED_VOCAB_FILES_MAP = { |
|
"vocab_file": { |
|
"microsoft/kosmos-2-patch14-224": "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/sentencepiece.bpe.model", |
|
} |
|
} |
|
|
|
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { |
|
"microsoft/kosmos-2-patch14-224": 2048, |
|
} |
|
|
|
|
|
class Kosmos2TokenizerFast(PreTrainedTokenizerFast): |
|
""" |
|
Construct a "fast" KOSMOS-2 tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from |
|
[`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on |
|
[BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models). |
|
|
|
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should |
|
refer to this superclass for more information regarding those methods. |
|
|
|
Args: |
|
vocab_file (`str`): |
|
Path to the vocabulary file. |
|
bos_token (`str`, *optional*, defaults to `"<s>"`): |
|
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. |
|
|
|
<Tip> |
|
|
|
When building a sequence using special tokens, this is not the token that is used for the beginning of |
|
sequence. The token used is the `cls_token`. |
|
|
|
</Tip> |
|
|
|
eos_token (`str`, *optional*, defaults to `"</s>"`): |
|
The end of sequence token. |
|
|
|
<Tip> |
|
|
|
When building a sequence using special tokens, this is not the token that is used for the end of sequence. |
|
The token used is the `sep_token`. |
|
|
|
</Tip> |
|
|
|
sep_token (`str`, *optional*, defaults to `"</s>"`): |
|
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for |
|
sequence classification or for a text and a question for question answering. It is also used as the last |
|
token of a sequence built with special tokens. |
|
cls_token (`str`, *optional*, defaults to `"<s>"`): |
|
The classifier token which is used when doing sequence classification (classification of the whole sequence |
|
instead of per-token classification). It is the first token of the sequence when built with special tokens. |
|
unk_token (`str`, *optional*, defaults to `"<unk>"`): |
|
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this |
|
token instead. |
|
pad_token (`str`, *optional*, defaults to `"<pad>"`): |
|
The token used for padding, for example when batching sequences of different lengths. |
|
mask_token (`str`, *optional*, defaults to `"<mask>"`): |
|
The token used for masking values. This is the token used when training this model with masked language |
|
modeling. This is the token which the model will try to predict. |
|
additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`): |
|
Additional special tokens used by the tokenizer. |
|
num_patch_index_tokens (`int`, *optional*, defaults to `1024`): |
|
The number of tokens used to specify the patch indices of bounding boxes in an image. These tokens have the |
|
format `<patch_index_xxxx>` where `xxxx` is an integer. |
|
""" |
|
|
|
vocab_files_names = VOCAB_FILES_NAMES |
|
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP |
|
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES |
|
model_input_names = ["input_ids", "attention_mask"] |
|
slow_tokenizer_class = Kosmos2Tokenizer |
|
|
|
def __init__( |
|
self, |
|
vocab_file=None, |
|
tokenizer_file=None, |
|
bos_token="<s>", |
|
eos_token="</s>", |
|
sep_token="</s>", |
|
cls_token="<s>", |
|
unk_token="<unk>", |
|
pad_token="<pad>", |
|
mask_token="<mask>", |
|
num_patch_index_tokens=1024, |
|
add_tag_and_patch_index_tokens=False, |
|
**kwargs, |
|
): |
|
|
|
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token |
|
|
|
super().__init__( |
|
vocab_file, |
|
tokenizer_file=tokenizer_file, |
|
bos_token=bos_token, |
|
eos_token=eos_token, |
|
sep_token=sep_token, |
|
cls_token=cls_token, |
|
unk_token=unk_token, |
|
pad_token=pad_token, |
|
mask_token=mask_token, |
|
**kwargs, |
|
) |
|
|
|
self.vocab_file = vocab_file |
|
|
|
self.eod_token = "</doc>" |
|
|
|
self.boi_token = "" |
|
|
|
self.eoc_token = "</chunk>" |
|
self.eol_token = "</line>" |
|
|
|
self.bop_token = "<phrase>" |
|
self.eop_token = "</phrase>" |
|
|
|
self.boo_token = "<object>" |
|
self.eoo_token = "</object>" |
|
|
|
self.dom_token = "</delimiter_of_multi_objects/>" |
|
|
|
self.grd_token = "<grounding>" |
|
|
|
self.tag_tokens = [ |
|
self.eod_token, |
|
self.boi_token, |
|
self.eoi_token, |
|
self.eoc_token, |
|
self.eol_token, |
|
self.bop_token, |
|
self.eop_token, |
|
self.boo_token, |
|
self.eoo_token, |
|
self.dom_token, |
|
self.grd_token, |
|
] |
|
|
|
self.num_patch_index_tokens = num_patch_index_tokens |
|
patch_index_tokens = [f"<patch_index_{str(x).zfill(4)}>" for x in range(self.num_patch_index_tokens)] |
|
|
|
if add_tag_and_patch_index_tokens: |
|
for idx, token in enumerate(self.tag_tokens + patch_index_tokens): |
|
|
|
self.add_tokens(AddedToken(token, lstrip=True, rstrip=False), special_tokens=False) |
|
|
|
@property |
|
def can_save_slow_tokenizer(self) -> bool: |
|
return os.path.isfile(self.vocab_file) if self.vocab_file else False |
|
|
|
def build_inputs_with_special_tokens( |
|
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None |
|
) -> List[int]: |
|
""" |
|
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and |
|
adding special tokens. An XLM-RoBERTa sequence has the following format: |
|
|
|
- single sequence: `<s> X </s>` |
|
- pair of sequences: `<s> A </s></s> B </s>` |
|
|
|
Args: |
|
token_ids_0 (`List[int]`): |
|
List of IDs to which the special tokens will be added. |
|
token_ids_1 (`List[int]`, *optional*): |
|
Optional second list of IDs for sequence pairs. |
|
|
|
Returns: |
|
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. |
|
""" |
|
|
|
if token_ids_1 is None: |
|
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] |
|
cls = [self.cls_token_id] |
|
sep = [self.sep_token_id] |
|
return cls + token_ids_0 + sep + sep + token_ids_1 + sep |
|
|
|
def create_token_type_ids_from_sequences( |
|
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None |
|
) -> List[int]: |
|
""" |
|
Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does |
|
not make use of token type ids, therefore a list of zeros is returned. |
|
|
|
Args: |
|
token_ids_0 (`List[int]`): |
|
List of IDs. |
|
token_ids_1 (`List[int]`, *optional*): |
|
Optional second list of IDs for sequence pairs. |
|
|
|
Returns: |
|
`List[int]`: List of zeros. |
|
|
|
""" |
|
|
|
sep = [self.sep_token_id] |
|
cls = [self.cls_token_id] |
|
|
|
if token_ids_1 is None: |
|
return len(cls + token_ids_0 + sep) * [0] |
|
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] |
|
|
|
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: |
|
if not self.can_save_slow_tokenizer: |
|
raise ValueError( |
|
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " |
|
"tokenizer." |
|
) |
|
|
|
if not os.path.isdir(save_directory): |
|
logger.error(f"Vocabulary path ({save_directory}) should be a directory.") |
|
return |
|
out_vocab_file = os.path.join( |
|
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] |
|
) |
|
|
|
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): |
|
copyfile(self.vocab_file, out_vocab_file) |
|
|
|
return (out_vocab_file,) |
|
|