tarekziade commited on
Commit
b7cc1ca
1 Parent(s): 07b911f

Upload 20 files

Browse files
README.md CHANGED
@@ -1,3 +1,78 @@
1
  ---
 
 
 
2
  license: apache-2.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ tags:
3
+ - image-to-text
4
+ - image-captioning
5
  license: apache-2.0
6
+ metrics:
7
+ - rouge
8
+ datasets:
9
+ - Mozilla/flickr30k-transformed-captions
10
+ widget:
11
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/savanna.jpg
12
+ example_title: Savanna
13
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/football-match.jpg
14
+ example_title: Football Match
15
+ - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/airport.jpg
16
+ example_title: Airport
17
+ base_model:
18
+ - google/vit-base-patch16-224-in21k
19
+
20
+ model-index:
21
+ - name: mozilla/distilvit
22
+ results:
23
+ - task:
24
+ type: image-to-text
25
+ name: Image To Text
26
+ dataset:
27
+ name: Mozilla/flickr30k-transformed-captions
28
+ type: Mozilla/flickr30k-transformed-captions
29
+ metrics:
30
+ - name: ROUGE-1
31
+ type: rouge
32
+ value: 43.006
33
+ verified: true
34
+ - name: ROUGE-2
35
+ type: rouge
36
+ value: 16.9939
37
+ verified: true
38
+ - name: ROUGE-L
39
+ type: rouge
40
+ value: 38.8923
41
+ verified: true
42
+ - name: ROUGE-LSUM
43
+ type: rouge
44
+ value: 38.8877
45
+ verified: true
46
+ - name: loss
47
+ type: loss
48
+ value: 0.19939416646957397
49
+ - name: gen_len
50
+ type: gen_len
51
+ value: 11.327256736227712
52
+ verified: true
53
  ---
54
+
55
+ # distilvit
56
+
57
+ This model is a work in progress. Fine-tuned version of those base models:
58
+
59
+ - a VIT model for the image encoder: https://huggingface.co/google/vit-base-patch16-224-in21k
60
+ - a Distilled GPT-2 model for the text decoder: https://huggingface.co/distilbert/distilgpt2
61
+
62
+ This model was trained on:
63
+
64
+ - Flickr30k : https://huggingface.co/datasets/nlphuji/flickr30k
65
+ - COCO 2017: https://cocodataset.org
66
+
67
+ You can get that checkpoint using the 3083a3cef6e3c8dd90df3f088074bbe836b0f403 commit.
68
+
69
+ It was then further fine-tuned on :
70
+
71
+ - [Flickr30k debiased](https://huggingface.co/datasets/Mozilla/flickr30k-transformed-captions)
72
+ - [DocOrNot](https://huggingface.co/datasets/Mozilla/docornot)
73
+ - [Alt Text Validation](https://huggingface.co/datasets/Mozilla/alt-text-validation)
74
+
75
+ For the latter, the dataset was annotated by our team to correct the alt text generated by the model,
76
+ using the [checkvite tool](https://github.com/mozila/checkvite).
77
+
78
+ You can find the code used to create the model here: https://github.com/mozilla/distilvit
config.json ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "C:\\Users\\tarek\\Dev\\distilvit\\distilvit\\..\\mozilla/distilvit+fine-tuned",
3
+ "architectures": [
4
+ "VisionEncoderDecoderModel"
5
+ ],
6
+ "decoder": {
7
+ "_name_or_path": "distilbert/distilgpt2",
8
+ "_num_labels": 1,
9
+ "activation_function": "gelu_new",
10
+ "add_cross_attention": true,
11
+ "architectures": [
12
+ "GPT2LMHeadModel"
13
+ ],
14
+ "attn_pdrop": 0.1,
15
+ "bad_words_ids": null,
16
+ "begin_suppress_tokens": null,
17
+ "bos_token_id": 50256,
18
+ "chunk_size_feed_forward": 0,
19
+ "cross_attention_hidden_size": null,
20
+ "decoder_start_token_id": null,
21
+ "diversity_penalty": 0.0,
22
+ "do_sample": false,
23
+ "early_stopping": false,
24
+ "embd_pdrop": 0.1,
25
+ "encoder_no_repeat_ngram_size": 0,
26
+ "eos_token_id": 50256,
27
+ "exponential_decay_length_penalty": null,
28
+ "finetuning_task": null,
29
+ "forced_bos_token_id": null,
30
+ "forced_eos_token_id": null,
31
+ "id2label": {
32
+ "0": "LABEL_0"
33
+ },
34
+ "initializer_range": 0.02,
35
+ "is_decoder": true,
36
+ "is_encoder_decoder": false,
37
+ "label2id": {
38
+ "LABEL_0": 0
39
+ },
40
+ "layer_norm_epsilon": 1e-05,
41
+ "length_penalty": 1.0,
42
+ "max_length": 20,
43
+ "min_length": 0,
44
+ "model_type": "gpt2",
45
+ "n_ctx": 1024,
46
+ "n_embd": 768,
47
+ "n_head": 12,
48
+ "n_inner": null,
49
+ "n_layer": 6,
50
+ "n_positions": 1024,
51
+ "no_repeat_ngram_size": 0,
52
+ "num_beam_groups": 1,
53
+ "num_beams": 1,
54
+ "num_return_sequences": 1,
55
+ "output_attentions": false,
56
+ "output_hidden_states": false,
57
+ "output_scores": false,
58
+ "pad_token_id": null,
59
+ "prefix": null,
60
+ "problem_type": null,
61
+ "pruned_heads": {},
62
+ "remove_invalid_values": false,
63
+ "reorder_and_upcast_attn": false,
64
+ "repetition_penalty": 1.0,
65
+ "resid_pdrop": 0.1,
66
+ "return_dict": true,
67
+ "return_dict_in_generate": false,
68
+ "scale_attn_by_inverse_layer_idx": false,
69
+ "scale_attn_weights": true,
70
+ "sep_token_id": null,
71
+ "summary_activation": null,
72
+ "summary_first_dropout": 0.1,
73
+ "summary_proj_to_labels": true,
74
+ "summary_type": "cls_index",
75
+ "summary_use_proj": true,
76
+ "suppress_tokens": null,
77
+ "task_specific_params": {
78
+ "text-generation": {
79
+ "do_sample": true,
80
+ "max_length": 50
81
+ }
82
+ },
83
+ "temperature": 1.0,
84
+ "tf_legacy_loss": false,
85
+ "tie_encoder_decoder": false,
86
+ "tie_word_embeddings": true,
87
+ "tokenizer_class": null,
88
+ "top_k": 50,
89
+ "top_p": 1.0,
90
+ "torch_dtype": null,
91
+ "torchscript": false,
92
+ "typical_p": 1.0,
93
+ "use_bfloat16": false,
94
+ "use_cache": true,
95
+ "vocab_size": 50257
96
+ },
97
+ "decoder_start_token_id": 50256,
98
+ "encoder": {
99
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
100
+ "add_cross_attention": false,
101
+ "architectures": [
102
+ "ViTModel"
103
+ ],
104
+ "attention_probs_dropout_prob": 0.0,
105
+ "bad_words_ids": null,
106
+ "begin_suppress_tokens": null,
107
+ "bos_token_id": null,
108
+ "chunk_size_feed_forward": 0,
109
+ "cross_attention_hidden_size": null,
110
+ "decoder_start_token_id": null,
111
+ "diversity_penalty": 0.0,
112
+ "do_sample": false,
113
+ "early_stopping": false,
114
+ "encoder_no_repeat_ngram_size": 0,
115
+ "encoder_stride": 16,
116
+ "eos_token_id": null,
117
+ "exponential_decay_length_penalty": null,
118
+ "finetuning_task": null,
119
+ "forced_bos_token_id": null,
120
+ "forced_eos_token_id": null,
121
+ "hidden_act": "gelu",
122
+ "hidden_dropout_prob": 0.0,
123
+ "hidden_size": 768,
124
+ "id2label": {
125
+ "0": "LABEL_0",
126
+ "1": "LABEL_1"
127
+ },
128
+ "image_size": 224,
129
+ "initializer_range": 0.02,
130
+ "intermediate_size": 3072,
131
+ "is_decoder": false,
132
+ "is_encoder_decoder": false,
133
+ "label2id": {
134
+ "LABEL_0": 0,
135
+ "LABEL_1": 1
136
+ },
137
+ "layer_norm_eps": 1e-12,
138
+ "length_penalty": 1.0,
139
+ "max_length": 20,
140
+ "min_length": 0,
141
+ "model_type": "vit",
142
+ "no_repeat_ngram_size": 0,
143
+ "num_attention_heads": 12,
144
+ "num_beam_groups": 1,
145
+ "num_beams": 1,
146
+ "num_channels": 3,
147
+ "num_hidden_layers": 12,
148
+ "num_return_sequences": 1,
149
+ "output_attentions": false,
150
+ "output_hidden_states": false,
151
+ "output_scores": false,
152
+ "pad_token_id": null,
153
+ "patch_size": 16,
154
+ "prefix": null,
155
+ "problem_type": null,
156
+ "pruned_heads": {},
157
+ "qkv_bias": true,
158
+ "remove_invalid_values": false,
159
+ "repetition_penalty": 1.0,
160
+ "return_dict": true,
161
+ "return_dict_in_generate": false,
162
+ "sep_token_id": null,
163
+ "suppress_tokens": null,
164
+ "task_specific_params": null,
165
+ "temperature": 1.0,
166
+ "tf_legacy_loss": false,
167
+ "tie_encoder_decoder": false,
168
+ "tie_word_embeddings": true,
169
+ "tokenizer_class": null,
170
+ "top_k": 50,
171
+ "top_p": 1.0,
172
+ "torch_dtype": null,
173
+ "torchscript": false,
174
+ "typical_p": 1.0,
175
+ "use_bfloat16": false
176
+ },
177
+ "eos_token_id": 50256,
178
+ "is_encoder_decoder": true,
179
+ "model_type": "vision-encoder-decoder",
180
+ "pad_token_id": 50256,
181
+ "tie_word_embeddings": false,
182
+ "transformers_version": "4.33.2"
183
+ }
generation_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 50256,
3
+ "do_sample": true,
4
+ "early_stopping": true,
5
+ "eos_token_id": 50256,
6
+ "max_length": 50,
7
+ "max_time": 5,
8
+ "no_repeat_ngram_size": 2,
9
+ "num_beams": 2,
10
+ "pad_token_id": 50256,
11
+ "repetition_penalty": 1.4,
12
+ "seed": 12,
13
+ "temperature": 0.8,
14
+ "top_p": 0.9,
15
+ "transformers_version": "4.33.2"
16
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
onnx/decoder_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b562d5631d23b71d9417071e8041f3eb80e84122a9bbb66dfdec8d46d91594d4
3
+ size 385864797
onnx/decoder_model_merged.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:129100f907e7295525d4529083bfb5bdcc206eedce9f9d4a6223131ea8cb8e87
3
+ size 387342586
onnx/decoder_model_merged_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d636dce4f19d08059cf075c25b75f94f71e5b28081d3be8c3823c07f40c59604
3
+ size 99759579
onnx/decoder_model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87de04f08ef3a7cec2f3cd71d4de440864ab0a7f4f32ad0764fdb8d26387fe94
3
+ size 98065763
onnx/decoder_with_past_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3e69a840d8b693b738d31f9bab09434311b1eb022f4c6a89f7c31ae4ac76428
3
+ size 385864377
onnx/decoder_with_past_model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5da506c3668083b54468a4be3d0581c2749abc28c68d4f6f816d7f03548ea977
3
+ size 98063170
onnx/encoder_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e06aee9b192045a32c40f0bac2e1304e153f7b7521286a65a478893a582b4087
3
+ size 343440632
onnx/encoder_model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90563de4250ab0e12730242476b884529a1a825059c36212bd4ae1dac2e87c56
3
+ size 87038173
preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "feature_extractor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c82a8f09d299196b2a3237374d874d008567e0d9d9044c48b24965138d8394e
3
+ size 730052378
quantize_config.json ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "per_channel": false,
3
+ "reduce_range": false,
4
+ "per_model_config": {
5
+ "decoder_model": {
6
+ "op_types": [
7
+ "Add",
8
+ "Transpose",
9
+ "Squeeze",
10
+ "Range",
11
+ "Slice",
12
+ "ConstantOfShape",
13
+ "MatMul",
14
+ "Where",
15
+ "ReduceMean",
16
+ "Div",
17
+ "Unsqueeze",
18
+ "Split",
19
+ "Cast",
20
+ "Gemm",
21
+ "Tanh",
22
+ "Pow",
23
+ "Mul",
24
+ "Gather",
25
+ "Reshape",
26
+ "Softmax",
27
+ "Sqrt",
28
+ "Shape",
29
+ "Constant",
30
+ "Sub",
31
+ "Concat"
32
+ ],
33
+ "weight_type": "QInt8"
34
+ },
35
+ "decoder_model_merged": {
36
+ "op_types": [
37
+ "Add",
38
+ "Transpose",
39
+ "Squeeze",
40
+ "Range",
41
+ "Slice",
42
+ "ConstantOfShape",
43
+ "MatMul",
44
+ "Where",
45
+ "ReduceMean",
46
+ "Div",
47
+ "Unsqueeze",
48
+ "Split",
49
+ "Cast",
50
+ "Gemm",
51
+ "Tanh",
52
+ "Pow",
53
+ "Mul",
54
+ "Gather",
55
+ "Reshape",
56
+ "Softmax",
57
+ "Sqrt",
58
+ "Shape",
59
+ "Constant",
60
+ "Sub",
61
+ "If",
62
+ "Concat"
63
+ ],
64
+ "weight_type": "QInt8"
65
+ },
66
+ "decoder_with_past_model": {
67
+ "op_types": [
68
+ "Add",
69
+ "Transpose",
70
+ "Squeeze",
71
+ "Range",
72
+ "Slice",
73
+ "ConstantOfShape",
74
+ "MatMul",
75
+ "Where",
76
+ "ReduceMean",
77
+ "Div",
78
+ "Unsqueeze",
79
+ "Split",
80
+ "Cast",
81
+ "Gemm",
82
+ "Tanh",
83
+ "Pow",
84
+ "Mul",
85
+ "Gather",
86
+ "Reshape",
87
+ "Softmax",
88
+ "Sqrt",
89
+ "Shape",
90
+ "Constant",
91
+ "Sub",
92
+ "Concat"
93
+ ],
94
+ "weight_type": "QInt8"
95
+ },
96
+ "encoder_model": {
97
+ "op_types": [
98
+ "Add",
99
+ "Transpose",
100
+ "Equal",
101
+ "Slice",
102
+ "Expand",
103
+ "ConstantOfShape",
104
+ "MatMul",
105
+ "Where",
106
+ "ReduceMean",
107
+ "Div",
108
+ "Unsqueeze",
109
+ "Conv",
110
+ "Pow",
111
+ "Mul",
112
+ "Gather",
113
+ "Reshape",
114
+ "Softmax",
115
+ "Sqrt",
116
+ "Shape",
117
+ "Constant",
118
+ "Sub",
119
+ "Erf",
120
+ "Concat"
121
+ ],
122
+ "weight_type": "QUInt8"
123
+ }
124
+ }
125
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<|endoftext|>",
4
+ "clean_up_tokenization_spaces": true,
5
+ "eos_token": "<|endoftext|>",
6
+ "model_max_length": 1024,
7
+ "tokenizer_class": "GPT2Tokenizer",
8
+ "unk_token": "<|endoftext|>"
9
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6124d14554c856f07f64ebbca472baff63ce12555bd850dbac15bdf294e64c5
3
+ size 4728
vocab.json ADDED
The diff for this file is too large to render. See raw diff