ppbrown commited on
Commit
fb65c78
1 Parent(s): d28c9f9

Upload T5/dumptokens.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. T5/dumptokens.py +53 -0
T5/dumptokens.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/env python
2
+
3
+ """
4
+ According to model,
5
+ vocab_size: 32128
6
+
7
+ But it actually caps out at 32099
8
+ """
9
+
10
+
11
+ from transformers import T5Tokenizer,T5EncoderModel
12
+ import torch
13
+ import charade
14
+
15
+ T="mcmonkey/google_t5-v1_1-xxl_encoderonly"
16
+
17
+ tokenizer = T5Tokenizer.from_pretrained(T)
18
+
19
+ startword= tokenizer.convert_ids_to_tokens(3)
20
+ #print (startword)
21
+
22
+ # id should be a numeral
23
+ def print_token_from_id(id):
24
+ decoded_tokens = tokenizer.convert_ids_to_tokens(id)
25
+ print(decoded_tokens+" : " + str(id))
26
+
27
+ # print if it has the marker indicating it is a standalone word,
28
+ # not just a building block
29
+ def print_if_word(id):
30
+ decoded_tokens = tokenizer.convert_ids_to_tokens(id)
31
+ if decoded_tokens.startswith(startword):
32
+ print(decoded_tokens[1:] +" : " + str(id))
33
+
34
+ # standalone word, AND doesnt have any foreign non-ascii7 chars
35
+ def print_if_asciiword(id):
36
+ decoded_tokens = tokenizer.convert_ids_to_tokens(id)
37
+ if decoded_tokens.startswith(startword):
38
+ aword=decoded_tokens[1:]
39
+ if len(aword) <1:
40
+ return
41
+ estr=str(aword.encode())
42
+ if '\\x' in estr:
43
+ return
44
+
45
+ print(aword +" : " , id)
46
+
47
+ for id in range(4,32099):
48
+ #print_token_from_id(id)
49
+ #print_if_word(id)
50
+ print_if_asciiword(id)
51
+
52
+
53
+