tokenspace / T5 /dumptokens.py
ppbrown's picture
Upload T5/dumptokens.py with huggingface_hub
fb65c78 verified
raw
history blame
1.26 kB
#!/bin/env python
"""
According to model,
vocab_size: 32128
But it actually caps out at 32099
"""
from transformers import T5Tokenizer,T5EncoderModel
import torch
import charade
T="mcmonkey/google_t5-v1_1-xxl_encoderonly"
tokenizer = T5Tokenizer.from_pretrained(T)
startword= tokenizer.convert_ids_to_tokens(3)
#print (startword)
# id should be a numeral
def print_token_from_id(id):
decoded_tokens = tokenizer.convert_ids_to_tokens(id)
print(decoded_tokens+" : " + str(id))
# print if it has the marker indicating it is a standalone word,
# not just a building block
def print_if_word(id):
decoded_tokens = tokenizer.convert_ids_to_tokens(id)
if decoded_tokens.startswith(startword):
print(decoded_tokens[1:] +" : " + str(id))
# standalone word, AND doesnt have any foreign non-ascii7 chars
def print_if_asciiword(id):
decoded_tokens = tokenizer.convert_ids_to_tokens(id)
if decoded_tokens.startswith(startword):
aword=decoded_tokens[1:]
if len(aword) <1:
return
estr=str(aword.encode())
if '\\x' in estr:
return
print(aword +" : " , id)
for id in range(4,32099):
#print_token_from_id(id)
#print_if_word(id)
print_if_asciiword(id)