|
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, logging |
|
from huggingface_hub import notebook_login |
|
notebook_login() |
|
|
|
logging.set_verbosity(logging.CRITICAL) |
|
|
|
|
|
model_name = "King-Harry/NinjaMasker-PII-Redaction" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
|
|
|
pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=100) |
|
prompt = "My name is Harry and I live in Winnipeg. My phone number is ummm 204 no 203, ahh 4344, no 4355" |
|
result = pipe(f"<s>[INST] {prompt} [/INST]") |
|
|
|
|
|
print(result[0]['generated_text']) |