from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, logging from huggingface_hub import notebook_login notebook_login() # Ignore warnings logging.set_verbosity(logging.CRITICAL) # Load the model and tokenizer with authentication token model_name = "King-Harry/NinjaMasker-PII-Redaction" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) # Generate text pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=100) prompt = "My name is Harry and I live in Winnipeg. My phone number is ummm 204 no 203, ahh 4344, no 4355" result = pipe(f"[INST] {prompt} [/INST]") # Print the generated text print(result[0]['generated_text'])