Error on running demo code

#5
by ansat7 - opened

import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from PIL import Image
import requests
from io import BytesIO

url = "https://huggingface.co/qresearch/llama-3-vision-alpha-hf/resolve/main/assets/demo-2.jpg"
response = requests.get(url)
image = Image.open(BytesIO(response.content))

model = AutoModelForCausalLM.from_pretrained(
"qresearch/llama-3.1-8B-vision-378",
trust_remote_code=True,
torch_dtype=torch.float16,
).to("cuda")

tokenizer = AutoTokenizer.from_pretrained("qresearch/llama-3.1-8B-vision-378", use_fast=True,)

print(
model.answer_question(
image, "Briefly describe the image", tokenizer, max_new_tokens=128, do_sample=True, temperature=0.3
),
)

ValueError Traceback (most recent call last)
in <cell line: 12>()
10
11
---> 12 model = AutoModelForCausalLM.from_pretrained(
13 "qresearch/llama-3.1-8B-vision-378",
14 trust_remote_code=True,

6 frames
/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py in from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs)
522 _ = kwargs.pop("quantization_config")
523
--> 524 config, kwargs = AutoConfig.from_pretrained(
525 pretrained_model_name_or_path,
526 return_unused_kwargs=True,

/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py in from_pretrained(cls, pretrained_model_name_or_path, **kwargs)
977 if os.path.isdir(pretrained_model_name_or_path):
978 config_class.register_for_auto_class()
--> 979 return config_class.from_pretrained(pretrained_model_name_or_path, **kwargs)
980 elif "model_type" in config_dict:
981 try:

/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py in from_pretrained(cls, pretrained_model_name_or_path, cache_dir, force_download, local_files_only, token, revision, **kwargs)
608 )
609
--> 610 return cls.from_dict(config_dict, **kwargs)
611
612 @classmethod

/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py in from_dict(cls, config_dict, **kwargs)
770 config_dict["attn_implementation"] = kwargs.pop("attn_implementation", None)
771
--> 772 config = cls(**config_dict)
773
774 if hasattr(config, "pruned_heads"):

~/.cache/huggingface/modules/transformers_modules/qresearch/llama-3.1-8B-vision-378/fde56dfdab000d054da70d846dafe82560f71c77/configuration_llamavision.py in init(self, **kwargs)
6
7 def init(self, **kwargs):
----> 8 self.text_config = LlamaConfig(**kwargs.pop("text_config", {}))
9 self.vision_config = SiglipVisionConfig(**kwargs.pop("vision_config", {}))
10 super().init(**kwargs)

/usr/local/lib/python3.10/dist-packages/transformers/models/llama/configuration_llama.py in init(self, vocab_size, hidden_size, intermediate_size, num_hidden_layers, num_attention_heads, num_key_value_heads, hidden_act, max_position_embeddings, initializer_range, rms_norm_eps, use_cache, pad_token_id, bos_token_id, eos_token_id, pretraining_tp, tie_word_embeddings, rope_theta, rope_scaling, attention_bias, attention_dropout, mlp_bias, **kwargs)
159 self.rope_theta = rope_theta
160 self.rope_scaling = rope_scaling
--> 161 self._rope_scaling_validation()
162 self.attention_bias = attention_bias
163 self.attention_dropout = attention_dropout

/usr/local/lib/python3.10/dist-packages/transformers/models/llama/configuration_llama.py in _rope_scaling_validation(self)
180
181 if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
--> 182 raise ValueError(
183 "rope_scaling must be a dictionary with two fields, type and factor, " f"got {self.rope_scaling}"
184 )

ValueError: rope_scaling must be a dictionary with two fields, type and factor, got {'factor': 8.0, 'high_freq_factor': 4.0, 'low_freq_factor': 1.0, 'original_max_position_embeddings': 8192, 'rope_type': 'llama3'}

Sign up or log in to comment