How to use: To use this model, you can load it directly from the Hugging Face Model Hub:
I simply copied the displayed code and ran it (missing a considerable amount of library imports), but encountered an error:
c:\Python311\Lib\site-packages\huggingface_hub\file_download.py:1150: FutureWarning: resume_download
is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use force_download=True
.
warnings.warn(
Traceback (most recent call last):
File "c:\Users\living\Desktop\test2.py", line 8, in
model = AutoModelForCausalLM.from_pretrained("MiaoshouAI/Florence-2-base-PromptGen", trust_remote_code=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "c:\Python311\Lib\site-packages\transformers\models\auto\auto_factory.py", line 444, in from_pretrained
config, kwargs = AutoConfig.from_pretrained(
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "c:\Python311\Lib\site-packages\transformers\models\auto\configuration_auto.py", line 938, in from_pretrained
return config_class.from_pretrained(pretrained_model_name_or_path, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "c:\Python311\Lib\site-packages\transformers\configuration_utils.py", line 554, in from_pretrained
return cls.from_dict(config_dict, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "c:\Python311\Lib\site-packages\transformers\configuration_utils.py", line 725, in from_dict
logger.info(f"Model config {config}")
^^^^^^^^^^^^^^^^^^^^^^^^
File "c:\Python311\Lib\site-packages\transformers\configuration_utils.py", line 757, in repr
return f"{self.class.name} {self.to_json_string()}"
^^^^^^^^^^^^^^^^^^^^^
File "c:\Python311\Lib\site-packages\transformers\configuration_utils.py", line 836, in to_json_string
return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "c:\Python311\Lib\json_init_.py", line 238, in dumps
**kw).encode(obj)
^^^^^^^^^^^
File "c:\Python311\Lib\json\encoder.py", line 202, in encode
chunks = list(chunks)
^^^^^^^^^^^^
File "c:\Python311\Lib\json\encoder.py", line 432, in _iterencode
yield from _iterencode_dict(o, _current_indent_level)
File "c:\Python311\Lib\json\encoder.py", line 406, in _iterencode_dict
yield from chunks
File "c:\Python311\Lib\json\encoder.py", line 439, in _iterencode
o = _default(o)
^^^^^^^^^^^
File "c:\Python311\Lib\json\encoder.py", line 180, in default
raise TypeError(f'Object of type {o.class.name} '
TypeError: Object of type Florence2LanguageConfig is not JSON serializable
The next is my code(modified):
import torch
import requests
from PIL import Image
from transformers import AutoProcessor, AutoModelForCausalLM
device = "cuda" if torch.cuda.is_available() else "cpu"
model = AutoModelForCausalLM.from_pretrained("MiaoshouAI/Florence-2-base-PromptGen", trust_remote_code=True)
processor = AutoProcessor.from_pretrained("MiaoshouAI/Florence-2-base-PromptGen", trust_remote_code=True)
prompt = ""
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true"
image = Image.open(requests.get(url, stream=True).raw)
inputs = processor(text=prompt, images=image, return_tensors="pt").to(device)
generated_ids = model.generate(
input_ids=inputs["input_ids"],
pixel_values=inputs["pixel_values"],
max_new_tokens=1024,
do_sample=False,
num_beams=3
)
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
parsed_answer = processor.post_process_generation(generated_text, task=prompt, image_size=(image.width, image.height))
print(parsed_answer)
Fix it pls.