unable to load
#21
by
Ratar37003
- opened
!pip install git+https://github.com/huggingface/transformers.git@main accelerate
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
import torch
model_id = "codellama/CodeLlama-7b-Instruct-hf"
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.float16
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
model_id,
quantization_config=quantization_config,
device_map="auto")
OSError: codellama/CodeLlama-7b-Instruct-hf does not appear to have a file named config.json. Checkout 'https://huggingface.co/codellama/CodeLlama-7b-Instruct-hf/None' for available files.
Install transformers from here pip install transformers
from transformers import AutoModelForCausalLM
MODEL_NAME = "codellama/CodeLlama-7b-Instruct-hf"
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, device_map="auto", trust_remote_code=True, load_in_8bit=True)