gemma-2b-var / generate.py
sigridjineth's picture
Upload folder using huggingface_hub
449a9b6 verified
raw
history blame contribute delete
No virus
762 Bytes
from transformers import AutoModelForCausalLM, AutoTokenizer
# Set the path to your model and tokenizer
model_directory = "/workspace/mergekit/output-model-directory"
tokenizer_directory = "/workspace/mergekit/output-model-directory"
# Load the tokenizer
tokenizer = AutoTokenizer.from_pretrained(tokenizer_directory, ignore_mismatched_sizes=True)
# Load the model
model = AutoModelForCausalLM.from_pretrained(model_directory, ignore_mismatched_sizes=True)
input_text = "Example input text here"
inputs = tokenizer(input_text, return_tensors="pt")
# Generate predictions
outputs = model.generate(inputs['input_ids'], max_length=50)
# Decode generated ids to text
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(generated_text)