huu-ontocord
commited on
Commit
•
9e922eb
1
Parent(s):
74d319b
Create image_embedding_phi3_v.py
Browse files- image_embedding_phi3_v.py +301 -0
image_embedding_phi3_v.py
ADDED
@@ -0,0 +1,301 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
import math
|
17 |
+
import torch
|
18 |
+
import torch.nn as nn
|
19 |
+
from transformers import CLIPVisionModel, PretrainedConfig
|
20 |
+
from transformers import CLIPVisionConfig
|
21 |
+
from transformers.utils import logging
|
22 |
+
from datetime import datetime
|
23 |
+
|
24 |
+
logger = logging.get_logger(__name__)
|
25 |
+
|
26 |
+
CLIP_VIT_LARGE_PATCH14_336_CONFIG = CLIPVisionConfig(
|
27 |
+
attention_dropout=0.0,
|
28 |
+
dropout=0.0,
|
29 |
+
hidden_act="quick_gelu",
|
30 |
+
hidden_size=1024,
|
31 |
+
image_size=336,
|
32 |
+
initializer_factor=1.0,
|
33 |
+
initializer_range=0.02,
|
34 |
+
intermediate_size=4096,
|
35 |
+
layer_norm_eps=1e-05,
|
36 |
+
num_attention_heads=16,
|
37 |
+
num_channels=3,
|
38 |
+
num_hidden_layers=24,
|
39 |
+
patch_size=14,
|
40 |
+
projection_dim=768
|
41 |
+
)
|
42 |
+
|
43 |
+
class Phi3ImageEmbedding(nn.Module):
|
44 |
+
"""Phi3 Image embedding."""
|
45 |
+
|
46 |
+
def __init__(self, config: PretrainedConfig, wte=None, **kwargs) -> None:
|
47 |
+
super().__init__()
|
48 |
+
|
49 |
+
# n_embed or hidden_size
|
50 |
+
hidden_size = config.n_embd if hasattr(config, 'n_embd') else config.hidden_size
|
51 |
+
if hasattr(config, 'embd_pdrop') or hasattr(config, 'embed_pdrop'):
|
52 |
+
embd_drop = config.embd_pdrop if hasattr(config, 'embd_pdrop') else config.embed_pdrop
|
53 |
+
self.drop = nn.Dropout(embd_drop)
|
54 |
+
else:
|
55 |
+
self.drop = None
|
56 |
+
|
57 |
+
self.wte = wte
|
58 |
+
|
59 |
+
if isinstance(config.img_processor, dict) and config.img_processor.get('name', None) == 'clip_vision_model':
|
60 |
+
assert 'model_name' in config.img_processor, 'model_name must be provided for CLIPVisionModel'
|
61 |
+
assert 'image_dim_out' in config.img_processor, 'image_dim_out must be provided for CLIPVisionModel'
|
62 |
+
assert 'num_img_tokens' in config.img_processor, 'num_img_tokens must be provided for CLIPVisionModel'
|
63 |
+
assert config.img_processor['model_name'] == 'openai/clip-vit-large-patch14-336'
|
64 |
+
clip_config = CLIP_VIT_LARGE_PATCH14_336_CONFIG
|
65 |
+
self.img_processor = CLIPVisionModel(clip_config)
|
66 |
+
image_dim_out = config.img_processor['image_dim_out']
|
67 |
+
self.num_img_tokens = config.img_processor['num_img_tokens']
|
68 |
+
else:
|
69 |
+
raise NotImplementedError(f'img_processor = {config.img_processor}, not implemented')
|
70 |
+
|
71 |
+
self.image_dim_out = image_dim_out
|
72 |
+
self.img_sizes = None
|
73 |
+
|
74 |
+
# global_gn and sub_gn for hd transform, serves as line separator
|
75 |
+
self.use_hd_transform = kwargs.get('use_hd_transform', False)
|
76 |
+
self.with_learnable_separator = kwargs.get('with_learnable_separator', False)
|
77 |
+
self.hd_transform_order = kwargs.get('hd_transform_order', 'glb_sub')
|
78 |
+
# with_hd_transform and with_learnable_separator should have same value
|
79 |
+
assert self.use_hd_transform == self.with_learnable_separator, 'use_hd_transform and with_learnable_separator should have same value'
|
80 |
+
if self.with_learnable_separator:
|
81 |
+
assert self.use_hd_transform, 'learnable separator is only for hd transform'
|
82 |
+
# 1024 * 4, merge spatial to channel dimension
|
83 |
+
self.glb_GN = nn.Parameter(torch.zeros([1, 1, self.image_dim_out * 4]))
|
84 |
+
self.sub_GN = nn.Parameter(torch.zeros([1, 1, 1, self.image_dim_out * 4]))
|
85 |
+
logger.info(f'learnable separator enabled for hd transform, hd_transform_order = {self.hd_transform_order}')
|
86 |
+
|
87 |
+
projection_cls = kwargs.get('projection_cls', 'linear')
|
88 |
+
if projection_cls == 'linear':
|
89 |
+
self.img_projection = nn.Linear(image_dim_out, hidden_size)
|
90 |
+
elif projection_cls == 'mlp' and self.use_hd_transform:
|
91 |
+
dim_projection = hidden_size
|
92 |
+
depth = 2
|
93 |
+
layers = [nn.Linear(image_dim_out * 4, dim_projection)]
|
94 |
+
for _ in range(1, depth):
|
95 |
+
layers.extend([nn.GELU(),
|
96 |
+
nn.Linear(dim_projection, dim_projection)])
|
97 |
+
self.img_projection = nn.Sequential(*layers)
|
98 |
+
elif projection_cls == 'mlp':
|
99 |
+
dim_projection = hidden_size
|
100 |
+
depth = 2
|
101 |
+
layers = [nn.Linear(image_dim_out, dim_projection)]
|
102 |
+
for _ in range(1, depth):
|
103 |
+
layers.extend([nn.GELU(),
|
104 |
+
nn.Linear(dim_projection, dim_projection)])
|
105 |
+
self.img_projection = nn.Sequential(*layers)
|
106 |
+
else:
|
107 |
+
raise NotImplementedError(f'projection_cls = {projection_cls}, not implemented')
|
108 |
+
|
109 |
+
self.vocab_size = config.vocab_size
|
110 |
+
self.img_features = None
|
111 |
+
|
112 |
+
if isinstance(config.img_processor, dict):
|
113 |
+
self.layer_idx = config.img_processor.get('layer_idx', -2)
|
114 |
+
self.type_feature = config.img_processor.get('type_feature', 'patch')
|
115 |
+
else:
|
116 |
+
self.layer_idx = -2
|
117 |
+
self.type_feature = 'patch'
|
118 |
+
|
119 |
+
|
120 |
+
def set_img_features(self, img_features: torch.FloatTensor) -> None:
|
121 |
+
self.img_features = img_features
|
122 |
+
|
123 |
+
def set_img_sizes(self, img_sizes: torch.LongTensor) -> None:
|
124 |
+
self.img_sizes = img_sizes
|
125 |
+
|
126 |
+
def get_img_features(self, img_embeds: torch.FloatTensor) -> torch.FloatTensor:
|
127 |
+
LAYER_IDX = self.layer_idx
|
128 |
+
TYPE_FEATURE = self.type_feature
|
129 |
+
|
130 |
+
img_processor_output = self.img_processor(img_embeds, output_hidden_states=True)
|
131 |
+
img_feature = img_processor_output.hidden_states[LAYER_IDX]
|
132 |
+
|
133 |
+
if TYPE_FEATURE == "patch":
|
134 |
+
patch_feature = img_feature[:, 1:]
|
135 |
+
return patch_feature
|
136 |
+
|
137 |
+
if TYPE_FEATURE == "cls_patch":
|
138 |
+
return img_feature
|
139 |
+
|
140 |
+
raise NotImplementedError
|
141 |
+
|
142 |
+
def forward(self, input_ids: torch.LongTensor, pixel_values: torch.FloatTensor, image_sizes=None) -> torch.FloatTensor:
|
143 |
+
|
144 |
+
MAX_INPUT_ID = int(1e9)
|
145 |
+
img_embeds = pixel_values
|
146 |
+
img_sizes = image_sizes
|
147 |
+
|
148 |
+
if self.img_features is not None:
|
149 |
+
img_embeds = self.img_features.clone()
|
150 |
+
self.img_features = None
|
151 |
+
|
152 |
+
if self.img_sizes is not None:
|
153 |
+
img_sizes = self.img_sizes
|
154 |
+
|
155 |
+
input_shape = input_ids.size()
|
156 |
+
input_ids = input_ids.view(-1, input_shape[-1])
|
157 |
+
|
158 |
+
with torch.no_grad():
|
159 |
+
positions = torch.nonzero((input_ids < 0) & (input_ids > -MAX_INPUT_ID), as_tuple=False)
|
160 |
+
|
161 |
+
select = False
|
162 |
+
|
163 |
+
if isinstance(self.img_projection, nn.Sequential):
|
164 |
+
target_device = self.img_projection[0].bias.device
|
165 |
+
target_dtype = self.img_projection[0].bias.dtype
|
166 |
+
else: # It's a single nn.Linear layer
|
167 |
+
target_device = self.img_projection.bias.device
|
168 |
+
target_dtype = self.img_projection.bias.dtype
|
169 |
+
|
170 |
+
if len(positions.tolist()) > 0:
|
171 |
+
with torch.no_grad():
|
172 |
+
g_values = abs(input_ids[positions[:, 0], positions[:, 1]])
|
173 |
+
|
174 |
+
if self.use_hd_transform and img_sizes is not None and len(img_sizes):
|
175 |
+
hd_transform = True
|
176 |
+
assert img_embeds.ndim == 5, f'img_embeds size: {img_embeds.size()}, expect 5D tensor for hd transform'
|
177 |
+
# img_embeds: (num_images, max_num_crops, 3, H, W)
|
178 |
+
# img_sizes: (num_images, 2).view(1, -1)
|
179 |
+
|
180 |
+
start_time = datetime.now()
|
181 |
+
bs = img_embeds.shape[0]
|
182 |
+
# Nx(HW)xC
|
183 |
+
img_features = self.get_img_features(img_embeds.flatten(0, 1))
|
184 |
+
base_feat_height = base_feat_width = int(img_features.shape[1] ** 0.5)
|
185 |
+
|
186 |
+
assert base_feat_height == 24 and base_feat_width == 24, f'base_feat_height: {base_feat_height}, base_feat_width: {base_feat_width}, expect 24x24 features for hd transform'
|
187 |
+
|
188 |
+
# bs x max_num_crops x (24x24) x C
|
189 |
+
img_features = img_features.view(bs, -1, base_feat_height * base_feat_width, self.image_dim_out)
|
190 |
+
C = self.image_dim_out
|
191 |
+
H = base_feat_height
|
192 |
+
|
193 |
+
output_imgs = []
|
194 |
+
output_len = []
|
195 |
+
# training is tensor, inference is list
|
196 |
+
if isinstance(img_sizes, torch.Tensor):
|
197 |
+
img_sizes = img_sizes.view(-1, 2)
|
198 |
+
for _bs in range(bs):
|
199 |
+
h, w = img_sizes[_bs]
|
200 |
+
h = h // 336
|
201 |
+
w = w // 336
|
202 |
+
B_ = h * w
|
203 |
+
|
204 |
+
# 1 x (24x24) x 1024
|
205 |
+
global_img_feature = img_features[_bs, :1]
|
206 |
+
|
207 |
+
# 1 x 12 x 12 x 4096
|
208 |
+
glb_img = global_img_feature.reshape(1,H,H,C).reshape(1,H//2,2,H//2,2,C).contiguous().permute(0,1,3,2,4,5).reshape(1,H//2,H//2,4*C).contiguous()
|
209 |
+
temp_glb_GN = self.sub_GN.repeat(1, H//2, 1, 1)
|
210 |
+
|
211 |
+
# 1 x 156 x 4096
|
212 |
+
glb_img = torch.cat([glb_img, temp_glb_GN], dim=2).reshape(1,-1,4*C)
|
213 |
+
|
214 |
+
# (max_num_crops-1) x (12x12) x C
|
215 |
+
sub_img = img_features[_bs, 1:]
|
216 |
+
# 16x574x1024
|
217 |
+
# get rid of padding sub_img
|
218 |
+
sub_img = sub_img[:B_]
|
219 |
+
|
220 |
+
# (num_crops, 12, 2, 12, 2, 1024) -> (num_crops, 12, 12, 2, 2, 1024) -> (num_crops, 12*12, 4*1024)
|
221 |
+
sub_img = sub_img.reshape(B_,H,H,C).reshape(B_,H//2,2,H//2,2,C).contiguous().permute(0,1,3,2,4,5).reshape(B_,-1,4*C).contiguous()
|
222 |
+
sub_img = sub_img.reshape(1, h, w, 12, 12, -1).permute(0,1,3,2,4,5).reshape(1,h*12,w*12,4*C)
|
223 |
+
temp_sub_GN = self.sub_GN.repeat(1, h*12, 1, 1)
|
224 |
+
sub_img = torch.cat([sub_img, temp_sub_GN], dim=2).reshape(1,-1,4*C)
|
225 |
+
# (1, num_img_tokens, 1024*4)
|
226 |
+
|
227 |
+
# glb + sub
|
228 |
+
if self.hd_transform_order == 'glb_sub':
|
229 |
+
output_imgs.append(torch.cat([glb_img, self.glb_GN, sub_img], dim=1))
|
230 |
+
elif self.hd_transform_order == 'sub_glb':
|
231 |
+
output_imgs.append(torch.cat([sub_img, self.glb_GN, glb_img], dim=1))
|
232 |
+
else:
|
233 |
+
raise NotImplementedError(f'hd_transform_order = {self.hd_transform_order}, not implemented')
|
234 |
+
|
235 |
+
temp_len = int((h*w+1)*144 + 1 + (h+1)*12)
|
236 |
+
assert temp_len == output_imgs[-1].shape[1], f'temp_len: {temp_len}, output_imgs[-1].shape[1]: {output_imgs[-1].shape[1]}'
|
237 |
+
output_len.append(temp_len)
|
238 |
+
|
239 |
+
num_img_tokens = output_len
|
240 |
+
img_set_tensor = []
|
241 |
+
for _output_img in output_imgs:
|
242 |
+
img_feature_proj = self.img_projection(_output_img.to(target_device).to(target_dtype))
|
243 |
+
img_set_tensor.append(img_feature_proj)
|
244 |
+
logger.info(f'img_embeds size: {img_embeds.size()}, image sizes: {img_sizes} loading time {datetime.now() - start_time}')
|
245 |
+
elif img_embeds.ndim == 4:
|
246 |
+
selected_g_values = g_values[::self.num_img_tokens]
|
247 |
+
assert len(img_embeds) == len(selected_g_values), f'img_embeds size: {img_embeds.size()}, selected_g_values size: {len(selected_g_values)}, selected_g_value {selected_g_values}'
|
248 |
+
start_time = datetime.now()
|
249 |
+
tt = (
|
250 |
+
self.get_img_features(img_embeds)
|
251 |
+
.to(target_device)
|
252 |
+
.to(target_dtype)
|
253 |
+
.reshape(-1, self.image_dim_out)
|
254 |
+
)
|
255 |
+
logger.info(f'img_embeds size: {img_embeds.size()}, loading time {datetime.now() - start_time}')
|
256 |
+
img_set_tensor = self.img_projection(tt) # adapted visual features.
|
257 |
+
elif img_embeds.ndim == 3:
|
258 |
+
selected_g_values = g_values[::self.num_img_tokens]
|
259 |
+
assert len(img_embeds) == len(selected_g_values), f'img_embeds size: {img_embeds.size()}, selected_g_values size: {len(selected_g_values)}, selected_g_value {selected_g_values}'
|
260 |
+
tt = (
|
261 |
+
img_embeds
|
262 |
+
.to(target_device)
|
263 |
+
.to(target_dtype)
|
264 |
+
.view(-1, self.image_dim_out)
|
265 |
+
)
|
266 |
+
img_set_tensor = self.img_projection(tt) # adapted visual features.
|
267 |
+
else:
|
268 |
+
raise NotImplementedError
|
269 |
+
select = True
|
270 |
+
|
271 |
+
with torch.no_grad():
|
272 |
+
input_ids.clamp_min_(0).clamp_max_(self.vocab_size)
|
273 |
+
|
274 |
+
hidden_states = self.wte(input_ids)
|
275 |
+
|
276 |
+
if select:
|
277 |
+
if hd_transform:
|
278 |
+
idx = 0
|
279 |
+
for i, cnt in enumerate(num_img_tokens):
|
280 |
+
hidden_states[positions[idx, 0], positions[idx, 1] : positions[idx, 1] + cnt] = (
|
281 |
+
img_set_tensor[i]
|
282 |
+
.to(hidden_states.dtype)
|
283 |
+
.to(hidden_states.device)
|
284 |
+
)
|
285 |
+
idx += cnt
|
286 |
+
else:
|
287 |
+
idx = 0
|
288 |
+
assert len(selected_g_values) * self.num_img_tokens == len(img_set_tensor), f'len(selected_g_values) * self.num_img_tokens = {len(selected_g_values) * self.num_img_tokens}, len(img_set_tensor) = {len(img_set_tensor)}'
|
289 |
+
for i, g in enumerate(selected_g_values):
|
290 |
+
cnt = self.num_img_tokens
|
291 |
+
hidden_states[positions[idx, 0], positions[idx, 1] : positions[idx, 1] + cnt] = (
|
292 |
+
img_set_tensor[i * cnt : (i + 1) * cnt]
|
293 |
+
.to(hidden_states.dtype)
|
294 |
+
.to(hidden_states.device)
|
295 |
+
)
|
296 |
+
idx += cnt
|
297 |
+
|
298 |
+
if self.drop is not None:
|
299 |
+
hidden_states = self.drop(hidden_states)
|
300 |
+
|
301 |
+
return hidden_states
|