Commit
•
49330f5
0
Parent(s):
Duplicate from merve/blip2-flan-t5-xxl
Browse files- .gitattributes +34 -0
- README.md +160 -0
- config.json +254 -0
- handler.py +40 -0
- preprocessor_config.json +24 -0
- pytorch_model-00001-of-00006.bin +3 -0
- pytorch_model-00002-of-00006.bin +3 -0
- pytorch_model-00003-of-00006.bin +3 -0
- pytorch_model-00004-of-00006.bin +3 -0
- pytorch_model-00005-of-00006.bin +3 -0
- pytorch_model-00006-of-00006.bin +3 -0
- pytorch_model.bin.index.json +0 -0
- requirements.txt +9 -0
- special_tokens_map.json +107 -0
- spiece.model +3 -0
- tokenizer.json +0 -0
- tokenizer_config.json +113 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language: en
|
3 |
+
license: mit
|
4 |
+
tags:
|
5 |
+
- vision
|
6 |
+
- image-to-text
|
7 |
+
- image-captioning
|
8 |
+
- visual-question-answering
|
9 |
+
pipeline_tag: image-to-text
|
10 |
+
inference: false
|
11 |
+
---
|
12 |
+
|
13 |
+
# BLIP-2, Flan T5-xxl, pre-trained only
|
14 |
+
|
15 |
+
BLIP-2 model, leveraging [Flan T5-xxl](https://huggingface.co/google/flan-t5-xxl) (a large language model).
|
16 |
+
It was introduced in the paper [BLIP-2: Bootstrapping Language-Image Pre-training with Frozen Image Encoders and Large Language Models](https://arxiv.org/abs/2301.12597) by Li et al. and first released in [this repository](https://github.com/salesforce/LAVIS/tree/main/projects/blip2).
|
17 |
+
|
18 |
+
Disclaimer: The team releasing BLIP-2 did not write a model card for this model so this model card has been written by the Hugging Face team.
|
19 |
+
|
20 |
+
## Model description
|
21 |
+
|
22 |
+
BLIP-2 consists of 3 models: a CLIP-like image encoder, a Querying Transformer (Q-Former) and a large language model.
|
23 |
+
|
24 |
+
The authors initialize the weights of the image encoder and large language model from pre-trained checkpoints and keep them frozen
|
25 |
+
while training the Querying Transformer, which is a BERT-like Transformer encoder that maps a set of "query tokens" to query embeddings,
|
26 |
+
which bridge the gap between the embedding space of the image encoder and the large language model.
|
27 |
+
|
28 |
+
The goal for the model is simply to predict the next text token, giving the query embeddings and the previous text.
|
29 |
+
|
30 |
+
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/blip2_architecture.jpg"
|
31 |
+
alt="drawing" width="600"/>
|
32 |
+
|
33 |
+
This allows the model to be used for tasks like:
|
34 |
+
|
35 |
+
- image captioning
|
36 |
+
- visual question answering (VQA)
|
37 |
+
- chat-like conversations by feeding the image and the previous conversation as prompt to the model
|
38 |
+
|
39 |
+
## Direct Use and Downstream Use
|
40 |
+
|
41 |
+
You can use the raw model for conditional text generation given an image and optional text. See the [model hub](https://huggingface.co/models?search=Salesforce/blip) to look for
|
42 |
+
fine-tuned versions on a task that interests you.
|
43 |
+
|
44 |
+
## Bias, Risks, Limitations, and Ethical Considerations
|
45 |
+
|
46 |
+
BLIP2-FlanT5 uses off-the-shelf Flan-T5 as the language model. It inherits the same risks and limitations from [Flan-T5](https://arxiv.org/pdf/2210.11416.pdf):
|
47 |
+
|
48 |
+
> Language models, including Flan-T5, can potentially be used for language generation in a harmful way, according to Rae et al. (2021). Flan-T5 should not be used directly in any application, without a prior assessment of safety and fairness concerns specific to the application.
|
49 |
+
|
50 |
+
BLIP2 is fine-tuned on image-text datasets (e.g. [LAION](https://laion.ai/blog/laion-400-open-dataset/) ) collected from the internet. As a result the model itself is potentially vulnerable to generating equivalently inappropriate content or replicating inherent biases in the underlying data.
|
51 |
+
|
52 |
+
BLIP2 has not been tested in real world applications. It should not be directly deployed in any applications. Researchers should first carefully assess the safety and fairness of the model in relation to the specific context they’re being deployed within.
|
53 |
+
|
54 |
+
|
55 |
+
### How to use
|
56 |
+
|
57 |
+
For code examples, we refer to the [documentation](https://huggingface.co/docs/transformers/main/en/model_doc/blip-2#transformers.Blip2ForConditionalGeneration.forward.example), or refer to the snippets below depending on your usecase:
|
58 |
+
|
59 |
+
#### Running the model on CPU
|
60 |
+
|
61 |
+
<details>
|
62 |
+
<summary> Click to expand </summary>
|
63 |
+
|
64 |
+
```python
|
65 |
+
import requests
|
66 |
+
from PIL import Image
|
67 |
+
from transformers import BlipProcessor, Blip2ForConditionalGeneration
|
68 |
+
|
69 |
+
processor = BlipProcessor.from_pretrained("Salesforce/blip2-flan-t5-xxl")
|
70 |
+
model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-flan-t5-xxl")
|
71 |
+
|
72 |
+
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
|
73 |
+
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
|
74 |
+
|
75 |
+
question = "how many dogs are in the picture?"
|
76 |
+
inputs = processor(raw_image, question, return_tensors="pt")
|
77 |
+
|
78 |
+
out = model.generate(**inputs)
|
79 |
+
print(processor.decode(out[0], skip_special_tokens=True))
|
80 |
+
```
|
81 |
+
</details>
|
82 |
+
|
83 |
+
#### Running the model on GPU
|
84 |
+
|
85 |
+
##### In full precision
|
86 |
+
|
87 |
+
<details>
|
88 |
+
<summary> Click to expand </summary>
|
89 |
+
|
90 |
+
```python
|
91 |
+
# pip install accelerate
|
92 |
+
import requests
|
93 |
+
from PIL import Image
|
94 |
+
from transformers import Blip2Processor, Blip2ForConditionalGeneration
|
95 |
+
|
96 |
+
processor = Blip2Processor.from_pretrained("Salesforce/blip2-flan-t5-xxl")
|
97 |
+
model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-flan-t5-xxl", device_map="auto")
|
98 |
+
|
99 |
+
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
|
100 |
+
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
|
101 |
+
|
102 |
+
question = "how many dogs are in the picture?"
|
103 |
+
inputs = processor(raw_image, question, return_tensors="pt").to("cuda")
|
104 |
+
|
105 |
+
out = model.generate(**inputs)
|
106 |
+
print(processor.decode(out[0], skip_special_tokens=True))
|
107 |
+
```
|
108 |
+
</details>
|
109 |
+
|
110 |
+
##### In half precision (`float16`)
|
111 |
+
|
112 |
+
<details>
|
113 |
+
<summary> Click to expand </summary>
|
114 |
+
|
115 |
+
```python
|
116 |
+
# pip install accelerate
|
117 |
+
import torch
|
118 |
+
import requests
|
119 |
+
from PIL import Image
|
120 |
+
from transformers import Blip2Processor, Blip2ForConditionalGeneration
|
121 |
+
|
122 |
+
processor = Blip2Processor.from_pretrained("Salesforce/blip2-flan-t5-xxl")
|
123 |
+
model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-flan-t5-xxl", torch_dtype=torch.float16, device_map="auto")
|
124 |
+
|
125 |
+
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
|
126 |
+
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
|
127 |
+
|
128 |
+
question = "how many dogs are in the picture?"
|
129 |
+
inputs = processor(raw_image, question, return_tensors="pt").to("cuda", torch.float16)
|
130 |
+
|
131 |
+
out = model.generate(**inputs)
|
132 |
+
print(processor.decode(out[0], skip_special_tokens=True))
|
133 |
+
```
|
134 |
+
</details>
|
135 |
+
|
136 |
+
##### In 8-bit precision (`int8`)
|
137 |
+
|
138 |
+
<details>
|
139 |
+
<summary> Click to expand </summary>
|
140 |
+
|
141 |
+
```python
|
142 |
+
# pip install accelerate bitsandbytes
|
143 |
+
import torch
|
144 |
+
import requests
|
145 |
+
from PIL import Image
|
146 |
+
from transformers import Blip2Processor, Blip2ForConditionalGeneration
|
147 |
+
|
148 |
+
processor = Blip2Processor.from_pretrained("Salesforce/blip2-flan-t5-xxl")
|
149 |
+
model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-flan-t5-xxl", load_in_8bit=True, device_map="auto")
|
150 |
+
|
151 |
+
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
|
152 |
+
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
|
153 |
+
|
154 |
+
question = "how many dogs are in the picture?"
|
155 |
+
inputs = processor(raw_image, question, return_tensors="pt").to("cuda", torch.float16)
|
156 |
+
|
157 |
+
out = model.generate(**inputs)
|
158 |
+
print(processor.decode(out[0], skip_special_tokens=True))
|
159 |
+
```
|
160 |
+
</details>
|
config.json
ADDED
@@ -0,0 +1,254 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_commit_hash": null,
|
3 |
+
"architectures": [
|
4 |
+
"Blip2ForConditionalGeneration"
|
5 |
+
],
|
6 |
+
"initializer_factor": 1.0,
|
7 |
+
"initializer_range": 0.02,
|
8 |
+
"model_type": "blip-2",
|
9 |
+
"num_query_tokens": 32,
|
10 |
+
"qformer_config": {
|
11 |
+
"_name_or_path": "",
|
12 |
+
"add_cross_attention": false,
|
13 |
+
"architectures": null,
|
14 |
+
"attention_probs_dropout_prob": 0.1,
|
15 |
+
"bad_words_ids": null,
|
16 |
+
"begin_suppress_tokens": null,
|
17 |
+
"bos_token_id": null,
|
18 |
+
"chunk_size_feed_forward": 0,
|
19 |
+
"classifier_dropout": null,
|
20 |
+
"cross_attention_frequency": 2,
|
21 |
+
"cross_attention_hidden_size": null,
|
22 |
+
"decoder_start_token_id": null,
|
23 |
+
"diversity_penalty": 0.0,
|
24 |
+
"do_sample": false,
|
25 |
+
"early_stopping": false,
|
26 |
+
"encoder_hidden_size": 1408,
|
27 |
+
"encoder_no_repeat_ngram_size": 0,
|
28 |
+
"eos_token_id": null,
|
29 |
+
"exponential_decay_length_penalty": null,
|
30 |
+
"finetuning_task": null,
|
31 |
+
"forced_bos_token_id": null,
|
32 |
+
"forced_eos_token_id": null,
|
33 |
+
"hidden_act": "gelu",
|
34 |
+
"hidden_dropout_prob": 0.1,
|
35 |
+
"hidden_size": 768,
|
36 |
+
"id2label": {
|
37 |
+
"0": "LABEL_0",
|
38 |
+
"1": "LABEL_1"
|
39 |
+
},
|
40 |
+
"initializer_range": 0.02,
|
41 |
+
"intermediate_size": 3072,
|
42 |
+
"is_decoder": false,
|
43 |
+
"is_encoder_decoder": false,
|
44 |
+
"label2id": {
|
45 |
+
"LABEL_0": 0,
|
46 |
+
"LABEL_1": 1
|
47 |
+
},
|
48 |
+
"layer_norm_eps": 1e-12,
|
49 |
+
"length_penalty": 1.0,
|
50 |
+
"max_length": 20,
|
51 |
+
"max_position_embeddings": 512,
|
52 |
+
"min_length": 0,
|
53 |
+
"model_type": "blip_2_qformer",
|
54 |
+
"no_repeat_ngram_size": 0,
|
55 |
+
"num_attention_heads": 12,
|
56 |
+
"num_beam_groups": 1,
|
57 |
+
"num_beams": 1,
|
58 |
+
"num_hidden_layers": 12,
|
59 |
+
"num_return_sequences": 1,
|
60 |
+
"output_attentions": false,
|
61 |
+
"output_hidden_states": false,
|
62 |
+
"output_scores": false,
|
63 |
+
"pad_token_id": 0,
|
64 |
+
"position_embedding_type": "absolute",
|
65 |
+
"prefix": null,
|
66 |
+
"problem_type": null,
|
67 |
+
"pruned_heads": {},
|
68 |
+
"remove_invalid_values": false,
|
69 |
+
"repetition_penalty": 1.0,
|
70 |
+
"return_dict": true,
|
71 |
+
"return_dict_in_generate": false,
|
72 |
+
"sep_token_id": null,
|
73 |
+
"suppress_tokens": null,
|
74 |
+
"task_specific_params": null,
|
75 |
+
"temperature": 1.0,
|
76 |
+
"tf_legacy_loss": false,
|
77 |
+
"tie_encoder_decoder": false,
|
78 |
+
"tie_word_embeddings": true,
|
79 |
+
"tokenizer_class": null,
|
80 |
+
"top_k": 50,
|
81 |
+
"top_p": 1.0,
|
82 |
+
"torch_dtype": null,
|
83 |
+
"torchscript": false,
|
84 |
+
"transformers_version": "4.27.0.dev0",
|
85 |
+
"typical_p": 1.0,
|
86 |
+
"use_bfloat16": false,
|
87 |
+
"vocab_size": 30522
|
88 |
+
},
|
89 |
+
"text_config": {
|
90 |
+
"_name_or_path": "",
|
91 |
+
"add_cross_attention": false,
|
92 |
+
"architectures": [
|
93 |
+
"T5ForConditionalGeneration"
|
94 |
+
],
|
95 |
+
"bad_words_ids": null,
|
96 |
+
"begin_suppress_tokens": null,
|
97 |
+
"bos_token_id": 1,
|
98 |
+
"chunk_size_feed_forward": 0,
|
99 |
+
"cross_attention_hidden_size": null,
|
100 |
+
"d_ff": 10240,
|
101 |
+
"d_kv": 64,
|
102 |
+
"d_model": 4096,
|
103 |
+
"decoder_start_token_id": 0,
|
104 |
+
"dense_act_fn": "gelu",
|
105 |
+
"diversity_penalty": 0.0,
|
106 |
+
"do_sample": false,
|
107 |
+
"dropout_rate": 0.1,
|
108 |
+
"early_stopping": false,
|
109 |
+
"encoder_no_repeat_ngram_size": 0,
|
110 |
+
"eos_token_id": 1,
|
111 |
+
"exponential_decay_length_penalty": null,
|
112 |
+
"feed_forward_proj": "gated-gelu",
|
113 |
+
"finetuning_task": null,
|
114 |
+
"forced_bos_token_id": null,
|
115 |
+
"forced_eos_token_id": null,
|
116 |
+
"id2label": {
|
117 |
+
"0": "LABEL_0",
|
118 |
+
"1": "LABEL_1"
|
119 |
+
},
|
120 |
+
"initializer_factor": 1.0,
|
121 |
+
"is_decoder": false,
|
122 |
+
"is_encoder_decoder": true,
|
123 |
+
"is_gated_act": true,
|
124 |
+
"label2id": {
|
125 |
+
"LABEL_0": 0,
|
126 |
+
"LABEL_1": 1
|
127 |
+
},
|
128 |
+
"layer_norm_epsilon": 1e-06,
|
129 |
+
"length_penalty": 1.0,
|
130 |
+
"max_length": 20,
|
131 |
+
"min_length": 0,
|
132 |
+
"model_type": "t5",
|
133 |
+
"no_repeat_ngram_size": 0,
|
134 |
+
"num_beam_groups": 1,
|
135 |
+
"num_beams": 1,
|
136 |
+
"num_decoder_layers": 24,
|
137 |
+
"num_heads": 64,
|
138 |
+
"num_layers": 24,
|
139 |
+
"num_return_sequences": 1,
|
140 |
+
"output_attentions": false,
|
141 |
+
"output_hidden_states": false,
|
142 |
+
"output_past": true,
|
143 |
+
"output_scores": false,
|
144 |
+
"pad_token_id": 0,
|
145 |
+
"prefix": null,
|
146 |
+
"problem_type": null,
|
147 |
+
"pruned_heads": {},
|
148 |
+
"relative_attention_max_distance": 128,
|
149 |
+
"relative_attention_num_buckets": 32,
|
150 |
+
"remove_invalid_values": false,
|
151 |
+
"repetition_penalty": 1.0,
|
152 |
+
"return_dict": true,
|
153 |
+
"return_dict_in_generate": false,
|
154 |
+
"sep_token_id": null,
|
155 |
+
"suppress_tokens": null,
|
156 |
+
"task_specific_params": null,
|
157 |
+
"temperature": 1.0,
|
158 |
+
"tf_legacy_loss": false,
|
159 |
+
"tie_encoder_decoder": false,
|
160 |
+
"tie_word_embeddings": false,
|
161 |
+
"tokenizer_class": null,
|
162 |
+
"top_k": 50,
|
163 |
+
"top_p": 1.0,
|
164 |
+
"torch_dtype": "float32",
|
165 |
+
"torchscript": false,
|
166 |
+
"transformers_version": "4.27.0.dev0",
|
167 |
+
"typical_p": 1.0,
|
168 |
+
"use_bfloat16": false,
|
169 |
+
"use_cache": true,
|
170 |
+
"vocab_size": 32128
|
171 |
+
},
|
172 |
+
"torch_dtype": "float32",
|
173 |
+
"transformers_version": null,
|
174 |
+
"use_decoder_only_language_model": false,
|
175 |
+
"vision_config": {
|
176 |
+
"_name_or_path": "",
|
177 |
+
"add_cross_attention": false,
|
178 |
+
"architectures": null,
|
179 |
+
"attention_dropout": 0.0,
|
180 |
+
"bad_words_ids": null,
|
181 |
+
"begin_suppress_tokens": null,
|
182 |
+
"bos_token_id": null,
|
183 |
+
"chunk_size_feed_forward": 0,
|
184 |
+
"cross_attention_hidden_size": null,
|
185 |
+
"decoder_start_token_id": null,
|
186 |
+
"diversity_penalty": 0.0,
|
187 |
+
"do_sample": false,
|
188 |
+
"dropout": 0.0,
|
189 |
+
"early_stopping": false,
|
190 |
+
"encoder_no_repeat_ngram_size": 0,
|
191 |
+
"eos_token_id": null,
|
192 |
+
"exponential_decay_length_penalty": null,
|
193 |
+
"finetuning_task": null,
|
194 |
+
"forced_bos_token_id": null,
|
195 |
+
"forced_eos_token_id": null,
|
196 |
+
"hidden_act": "gelu",
|
197 |
+
"hidden_size": 1408,
|
198 |
+
"id2label": {
|
199 |
+
"0": "LABEL_0",
|
200 |
+
"1": "LABEL_1"
|
201 |
+
},
|
202 |
+
"image_size": 224,
|
203 |
+
"initializer_factor": 1.0,
|
204 |
+
"initializer_range": 1e-10,
|
205 |
+
"intermediate_size": 6144,
|
206 |
+
"is_decoder": false,
|
207 |
+
"is_encoder_decoder": false,
|
208 |
+
"label2id": {
|
209 |
+
"LABEL_0": 0,
|
210 |
+
"LABEL_1": 1
|
211 |
+
},
|
212 |
+
"layer_norm_eps": 1e-6,
|
213 |
+
"length_penalty": 1.0,
|
214 |
+
"max_length": 20,
|
215 |
+
"min_length": 0,
|
216 |
+
"model_type": "blip_2_vision_model",
|
217 |
+
"no_repeat_ngram_size": 0,
|
218 |
+
"num_attention_heads": 16,
|
219 |
+
"num_beam_groups": 1,
|
220 |
+
"num_beams": 1,
|
221 |
+
"num_channels": 3,
|
222 |
+
"num_hidden_layers": 39,
|
223 |
+
"num_return_sequences": 1,
|
224 |
+
"output_attentions": false,
|
225 |
+
"output_hidden_states": false,
|
226 |
+
"output_scores": false,
|
227 |
+
"pad_token_id": null,
|
228 |
+
"patch_size": 14,
|
229 |
+
"prefix": null,
|
230 |
+
"problem_type": null,
|
231 |
+
"projection_dim": 512,
|
232 |
+
"pruned_heads": {},
|
233 |
+
"qkv_bias": true,
|
234 |
+
"remove_invalid_values": false,
|
235 |
+
"repetition_penalty": 1.0,
|
236 |
+
"return_dict": true,
|
237 |
+
"return_dict_in_generate": false,
|
238 |
+
"sep_token_id": null,
|
239 |
+
"suppress_tokens": null,
|
240 |
+
"task_specific_params": null,
|
241 |
+
"temperature": 1.0,
|
242 |
+
"tf_legacy_loss": false,
|
243 |
+
"tie_encoder_decoder": false,
|
244 |
+
"tie_word_embeddings": true,
|
245 |
+
"tokenizer_class": null,
|
246 |
+
"top_k": 50,
|
247 |
+
"top_p": 1.0,
|
248 |
+
"torch_dtype": null,
|
249 |
+
"torchscript": false,
|
250 |
+
"transformers_version": "4.27.0.dev0",
|
251 |
+
"typical_p": 1.0,
|
252 |
+
"use_bfloat16": false
|
253 |
+
}
|
254 |
+
}
|
handler.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict, List, Any
|
2 |
+
from transformers import AutoProcessor, Blip2ForConditionalGeneration
|
3 |
+
import base64
|
4 |
+
from io import BytesIO
|
5 |
+
from PIL import Image
|
6 |
+
import string
|
7 |
+
import torch
|
8 |
+
|
9 |
+
class EndpointHandler:
|
10 |
+
def __init__(self, path=""):
|
11 |
+
# load model and processor from path
|
12 |
+
self.processor = AutoProcessor.from_pretrained(path)
|
13 |
+
self.model = Blip2ForConditionalGeneration.from_pretrained(path, device_map="auto", load_in_4bit=True)
|
14 |
+
|
15 |
+
def __call__(self, data):
|
16 |
+
"""
|
17 |
+
Args:
|
18 |
+
inputs:
|
19 |
+
Dict of image and text inputs.
|
20 |
+
"""
|
21 |
+
# process input
|
22 |
+
inputs = data.pop("inputs", data)
|
23 |
+
image = Image.open(BytesIO(base64.b64decode(inputs['image'])))
|
24 |
+
inputs = self.processor(images=image, text=inputs["text"], return_tensors="pt").to("cuda", torch.float16)
|
25 |
+
generated_ids = self.model.generate(
|
26 |
+
**inputs,
|
27 |
+
temperature=1.0,
|
28 |
+
length_penalty=1.0,
|
29 |
+
repetition_penalty=1.5,
|
30 |
+
max_length=30,
|
31 |
+
min_length=1,
|
32 |
+
num_beams=5,
|
33 |
+
top_p=0.9,
|
34 |
+
)
|
35 |
+
result = self.processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
|
36 |
+
if result and result[-1] not in string.punctuation:
|
37 |
+
result += "."
|
38 |
+
|
39 |
+
|
40 |
+
return [{"generated_text": result}]
|
preprocessor_config.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_convert_rgb": true,
|
3 |
+
"do_normalize": true,
|
4 |
+
"do_rescale": true,
|
5 |
+
"do_resize": true,
|
6 |
+
"image_mean": [
|
7 |
+
0.48145466,
|
8 |
+
0.4578275,
|
9 |
+
0.40821073
|
10 |
+
],
|
11 |
+
"image_processor_type": "BlipImageProcessor",
|
12 |
+
"image_std": [
|
13 |
+
0.26862954,
|
14 |
+
0.26130258,
|
15 |
+
0.27577711
|
16 |
+
],
|
17 |
+
"processor_class": "Blip2Processor",
|
18 |
+
"resample": 3,
|
19 |
+
"rescale_factor": 0.00392156862745098,
|
20 |
+
"size": {
|
21 |
+
"height": 224,
|
22 |
+
"width": 224
|
23 |
+
}
|
24 |
+
}
|
pytorch_model-00001-of-00006.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7f4a89a2be7e4dfb9df01d625de507e05ef7d98802373ecc2b9eef275e3a0e21
|
3 |
+
size 9366645070
|
pytorch_model-00002-of-00006.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c6c798d563cef908391f0dd851e79bf48dd0e3ab1c201ae263adfe5c2b570669
|
3 |
+
size 9865471479
|
pytorch_model-00003-of-00006.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2d531b198e00ec4921d780013e8e8e9d5a33d8f176eb1f0c1870a0990d9ecc46
|
3 |
+
size 9989230293
|
pytorch_model-00004-of-00006.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8e895a309e3feebd4002e85463a2c9e3f5e9f8b222eded51e9697ae300784475
|
3 |
+
size 9999745855
|
pytorch_model-00005-of-00006.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bf66dfbcca32106c3abdece9b6ebfd437388b77d22efdc397e4263b0bfd2c590
|
3 |
+
size 9697753745
|
pytorch_model-00006-of-00006.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:573fc31aa93a65a8f44678aee87fea5d39613b44bcc80886c0b01d25da59855f
|
3 |
+
size 526386090
|
pytorch_model.bin.index.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
requirements.txt
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
accelerate==0.23.0
|
2 |
+
bitsandbytes==0.41.1
|
3 |
+
gradio==3.44.4
|
4 |
+
huggingface-hub==0.17.2
|
5 |
+
Pillow==10.0.1
|
6 |
+
scipy==1.11.2
|
7 |
+
torch==2.0.0
|
8 |
+
torchvision==0.15.1
|
9 |
+
transformers==4.33.2
|
special_tokens_map.json
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<extra_id_0>",
|
4 |
+
"<extra_id_1>",
|
5 |
+
"<extra_id_2>",
|
6 |
+
"<extra_id_3>",
|
7 |
+
"<extra_id_4>",
|
8 |
+
"<extra_id_5>",
|
9 |
+
"<extra_id_6>",
|
10 |
+
"<extra_id_7>",
|
11 |
+
"<extra_id_8>",
|
12 |
+
"<extra_id_9>",
|
13 |
+
"<extra_id_10>",
|
14 |
+
"<extra_id_11>",
|
15 |
+
"<extra_id_12>",
|
16 |
+
"<extra_id_13>",
|
17 |
+
"<extra_id_14>",
|
18 |
+
"<extra_id_15>",
|
19 |
+
"<extra_id_16>",
|
20 |
+
"<extra_id_17>",
|
21 |
+
"<extra_id_18>",
|
22 |
+
"<extra_id_19>",
|
23 |
+
"<extra_id_20>",
|
24 |
+
"<extra_id_21>",
|
25 |
+
"<extra_id_22>",
|
26 |
+
"<extra_id_23>",
|
27 |
+
"<extra_id_24>",
|
28 |
+
"<extra_id_25>",
|
29 |
+
"<extra_id_26>",
|
30 |
+
"<extra_id_27>",
|
31 |
+
"<extra_id_28>",
|
32 |
+
"<extra_id_29>",
|
33 |
+
"<extra_id_30>",
|
34 |
+
"<extra_id_31>",
|
35 |
+
"<extra_id_32>",
|
36 |
+
"<extra_id_33>",
|
37 |
+
"<extra_id_34>",
|
38 |
+
"<extra_id_35>",
|
39 |
+
"<extra_id_36>",
|
40 |
+
"<extra_id_37>",
|
41 |
+
"<extra_id_38>",
|
42 |
+
"<extra_id_39>",
|
43 |
+
"<extra_id_40>",
|
44 |
+
"<extra_id_41>",
|
45 |
+
"<extra_id_42>",
|
46 |
+
"<extra_id_43>",
|
47 |
+
"<extra_id_44>",
|
48 |
+
"<extra_id_45>",
|
49 |
+
"<extra_id_46>",
|
50 |
+
"<extra_id_47>",
|
51 |
+
"<extra_id_48>",
|
52 |
+
"<extra_id_49>",
|
53 |
+
"<extra_id_50>",
|
54 |
+
"<extra_id_51>",
|
55 |
+
"<extra_id_52>",
|
56 |
+
"<extra_id_53>",
|
57 |
+
"<extra_id_54>",
|
58 |
+
"<extra_id_55>",
|
59 |
+
"<extra_id_56>",
|
60 |
+
"<extra_id_57>",
|
61 |
+
"<extra_id_58>",
|
62 |
+
"<extra_id_59>",
|
63 |
+
"<extra_id_60>",
|
64 |
+
"<extra_id_61>",
|
65 |
+
"<extra_id_62>",
|
66 |
+
"<extra_id_63>",
|
67 |
+
"<extra_id_64>",
|
68 |
+
"<extra_id_65>",
|
69 |
+
"<extra_id_66>",
|
70 |
+
"<extra_id_67>",
|
71 |
+
"<extra_id_68>",
|
72 |
+
"<extra_id_69>",
|
73 |
+
"<extra_id_70>",
|
74 |
+
"<extra_id_71>",
|
75 |
+
"<extra_id_72>",
|
76 |
+
"<extra_id_73>",
|
77 |
+
"<extra_id_74>",
|
78 |
+
"<extra_id_75>",
|
79 |
+
"<extra_id_76>",
|
80 |
+
"<extra_id_77>",
|
81 |
+
"<extra_id_78>",
|
82 |
+
"<extra_id_79>",
|
83 |
+
"<extra_id_80>",
|
84 |
+
"<extra_id_81>",
|
85 |
+
"<extra_id_82>",
|
86 |
+
"<extra_id_83>",
|
87 |
+
"<extra_id_84>",
|
88 |
+
"<extra_id_85>",
|
89 |
+
"<extra_id_86>",
|
90 |
+
"<extra_id_87>",
|
91 |
+
"<extra_id_88>",
|
92 |
+
"<extra_id_89>",
|
93 |
+
"<extra_id_90>",
|
94 |
+
"<extra_id_91>",
|
95 |
+
"<extra_id_92>",
|
96 |
+
"<extra_id_93>",
|
97 |
+
"<extra_id_94>",
|
98 |
+
"<extra_id_95>",
|
99 |
+
"<extra_id_96>",
|
100 |
+
"<extra_id_97>",
|
101 |
+
"<extra_id_98>",
|
102 |
+
"<extra_id_99>"
|
103 |
+
],
|
104 |
+
"eos_token": "</s>",
|
105 |
+
"pad_token": "<pad>",
|
106 |
+
"unk_token": "<unk>"
|
107 |
+
}
|
spiece.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d60acb128cf7b7f2536e8f38a5b18a05535c9e14c7a355904270e15b0945ea86
|
3 |
+
size 791656
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<extra_id_0>",
|
4 |
+
"<extra_id_1>",
|
5 |
+
"<extra_id_2>",
|
6 |
+
"<extra_id_3>",
|
7 |
+
"<extra_id_4>",
|
8 |
+
"<extra_id_5>",
|
9 |
+
"<extra_id_6>",
|
10 |
+
"<extra_id_7>",
|
11 |
+
"<extra_id_8>",
|
12 |
+
"<extra_id_9>",
|
13 |
+
"<extra_id_10>",
|
14 |
+
"<extra_id_11>",
|
15 |
+
"<extra_id_12>",
|
16 |
+
"<extra_id_13>",
|
17 |
+
"<extra_id_14>",
|
18 |
+
"<extra_id_15>",
|
19 |
+
"<extra_id_16>",
|
20 |
+
"<extra_id_17>",
|
21 |
+
"<extra_id_18>",
|
22 |
+
"<extra_id_19>",
|
23 |
+
"<extra_id_20>",
|
24 |
+
"<extra_id_21>",
|
25 |
+
"<extra_id_22>",
|
26 |
+
"<extra_id_23>",
|
27 |
+
"<extra_id_24>",
|
28 |
+
"<extra_id_25>",
|
29 |
+
"<extra_id_26>",
|
30 |
+
"<extra_id_27>",
|
31 |
+
"<extra_id_28>",
|
32 |
+
"<extra_id_29>",
|
33 |
+
"<extra_id_30>",
|
34 |
+
"<extra_id_31>",
|
35 |
+
"<extra_id_32>",
|
36 |
+
"<extra_id_33>",
|
37 |
+
"<extra_id_34>",
|
38 |
+
"<extra_id_35>",
|
39 |
+
"<extra_id_36>",
|
40 |
+
"<extra_id_37>",
|
41 |
+
"<extra_id_38>",
|
42 |
+
"<extra_id_39>",
|
43 |
+
"<extra_id_40>",
|
44 |
+
"<extra_id_41>",
|
45 |
+
"<extra_id_42>",
|
46 |
+
"<extra_id_43>",
|
47 |
+
"<extra_id_44>",
|
48 |
+
"<extra_id_45>",
|
49 |
+
"<extra_id_46>",
|
50 |
+
"<extra_id_47>",
|
51 |
+
"<extra_id_48>",
|
52 |
+
"<extra_id_49>",
|
53 |
+
"<extra_id_50>",
|
54 |
+
"<extra_id_51>",
|
55 |
+
"<extra_id_52>",
|
56 |
+
"<extra_id_53>",
|
57 |
+
"<extra_id_54>",
|
58 |
+
"<extra_id_55>",
|
59 |
+
"<extra_id_56>",
|
60 |
+
"<extra_id_57>",
|
61 |
+
"<extra_id_58>",
|
62 |
+
"<extra_id_59>",
|
63 |
+
"<extra_id_60>",
|
64 |
+
"<extra_id_61>",
|
65 |
+
"<extra_id_62>",
|
66 |
+
"<extra_id_63>",
|
67 |
+
"<extra_id_64>",
|
68 |
+
"<extra_id_65>",
|
69 |
+
"<extra_id_66>",
|
70 |
+
"<extra_id_67>",
|
71 |
+
"<extra_id_68>",
|
72 |
+
"<extra_id_69>",
|
73 |
+
"<extra_id_70>",
|
74 |
+
"<extra_id_71>",
|
75 |
+
"<extra_id_72>",
|
76 |
+
"<extra_id_73>",
|
77 |
+
"<extra_id_74>",
|
78 |
+
"<extra_id_75>",
|
79 |
+
"<extra_id_76>",
|
80 |
+
"<extra_id_77>",
|
81 |
+
"<extra_id_78>",
|
82 |
+
"<extra_id_79>",
|
83 |
+
"<extra_id_80>",
|
84 |
+
"<extra_id_81>",
|
85 |
+
"<extra_id_82>",
|
86 |
+
"<extra_id_83>",
|
87 |
+
"<extra_id_84>",
|
88 |
+
"<extra_id_85>",
|
89 |
+
"<extra_id_86>",
|
90 |
+
"<extra_id_87>",
|
91 |
+
"<extra_id_88>",
|
92 |
+
"<extra_id_89>",
|
93 |
+
"<extra_id_90>",
|
94 |
+
"<extra_id_91>",
|
95 |
+
"<extra_id_92>",
|
96 |
+
"<extra_id_93>",
|
97 |
+
"<extra_id_94>",
|
98 |
+
"<extra_id_95>",
|
99 |
+
"<extra_id_96>",
|
100 |
+
"<extra_id_97>",
|
101 |
+
"<extra_id_98>",
|
102 |
+
"<extra_id_99>"
|
103 |
+
],
|
104 |
+
"eos_token": "</s>",
|
105 |
+
"extra_ids": 100,
|
106 |
+
"model_max_length": 512,
|
107 |
+
"pad_token": "<pad>",
|
108 |
+
"processor_class": "Blip2Processor",
|
109 |
+
"sp_model_kwargs": {},
|
110 |
+
"special_tokens_map_file": "/home/arthur_huggingface_co/.cache/huggingface/hub/models--google--t5-v1_1-small/snapshots/fb7e6cba609f7bab11c614294bc04f82f613c7b1/special_tokens_map.json",
|
111 |
+
"tokenizer_class": "T5Tokenizer",
|
112 |
+
"unk_token": "<unk>"
|
113 |
+
}
|