File size: 1,296 Bytes
751936e
 
 
 
 
d10ecd7
 
751936e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f4973d4
 
 
 
 
 
 
 
 
751936e
 
 
 
 
d10ecd7
751936e
 
 
f4973d4
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
"""

vocab size: 106029

中文汉字数:54230, 中文标点数: 549

moss很奇怪,
"""

import json
from transformers import AutoTokenizer, BloomTokenizerFast


# tokenizer = AutoTokenizer.from_pretrained("tokenizer", trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained("moss-moon-003-sft", trust_remote_code=True)

print("vocab size:", tokenizer.vocab_size)

# tokens = tokenizer.encode("中国\nabcde<eoc>")
tokens = tokenizer.encode("中<eoc>")
decode_line = tokenizer.decode(tokens)
for token in tokens:
    print(token, tokenizer.decode([token]))


def test1():
    word = "中"
    token_ids = tokenizer.encode(word)
    tokens = tokenizer.convert_ids_to_tokens(token_ids)
    print(tokens)
    print([ord(k) for k in tokens[0]])
    decode_str = tokenizer.convert_tokens_to_string(tokens)
    print(decode_str)

def test_token():
    for word in "中国解决方法黑白侗,。!?;":
        encoding = tokenizer.encode(word)
        for token_id in encoding:
            decode_str = tokenizer.decode([token_id])  # 特殊字符解码后会统一变成 �,对应 "\ufffd"
            token = tokenizer.convert_ids_to_tokens([token_id])
            print(word, token_id, decode_str, json.dumps(decode_str), token, json.dumps(token))



# test_token()
test1()