File size: 1,383 Bytes
0dc8cc8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
# Necessary imports
import sys
from typing import Any
import torch
from transformers import AutoModel, AutoTokenizer
# Local imports
from src.logger import logging
from src.exception import CustomExceptionHandling
def load_model_and_tokenizer(model_name: str, device: str) -> Any:
"""
Load the model and tokenizer.
Args:
- model_name (str): The name of the model to load.
- device (str): The device to load the model onto.
Returns:
- model: The loaded model.
- tokenizer: The loaded tokenizer.
"""
try:
# Load the model and tokenizer
model = AutoModel.from_pretrained(
model_name,
trust_remote_code=True,
attn_implementation="sdpa",
torch_dtype=torch.bfloat16,
)
model = model.to(device=device)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model.eval()
# Log the successful loading of the model and tokenizer
logging.info("Model and tokenizer loaded successfully.")
# Return the model and tokenizer
return model, tokenizer
# Handle exceptions that may occur during model and tokenizer loading
except Exception as e:
# Custom exception handling
raise CustomExceptionHandling(e, sys) from e
|