File size: 1,143 Bytes
7156337
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43


import tiktoken
from tiktoken import Encoding

tokenizer = tiktoken.encoding_for_model('gpt-4')
tokenizer.vocab_size = tokenizer.n_vocab



def decode(self, tokens, errors="replace"):
# def decode(self, tokens: list[int], errors: str = "replace") -> str:
    try:
        decode_str = self._core_bpe.decode_bytes(tokens).decode("utf-8", errors=errors)
    except:
        decode_str = "null"
    return decode_str

def convert_ids_to_tokens(self, tokens):
    return tokenizer.decode_tokens_bytes(tokens)

def get_vocab(self):
    """Returns vocab as a dict"""
    vocab = {}
    for i in range(self.vocab_size):
        try:
            token_byte = self.convert_ids_to_tokens([i])[0]
            token_str = token_byte.decode("utf-8")
            vocab[token_str] = i
        except KeyError:
            print("gpt_35_turbo decode KeyError", i)
        except UnicodeDecodeError:
            print("gpt_35_turbo decode UnicodeDecodeError", i, str(token_byte))
    # vocab.update(self.added_tokens_encoder)
    return vocab


Encoding.decode = decode
Encoding.convert_ids_to_tokens = convert_ids_to_tokens
Encoding.get_vocab = get_vocab