Update modeling_GOT.py
Browse files- modeling_GOT.py +1 -1
modeling_GOT.py
CHANGED
@@ -237,7 +237,6 @@ class GOTQwenModel(Qwen2Model):
|
|
237 |
|
238 |
for image in images:
|
239 |
P, C, H, W = image.shape
|
240 |
-
print(image.shape)
|
241 |
if P == 1:
|
242 |
with torch.set_grad_enabled(False):
|
243 |
cnn_feature = vision_tower_high(image)
|
@@ -250,6 +249,7 @@ class GOTQwenModel(Qwen2Model):
|
|
250 |
image_patches_features = []
|
251 |
for image_patch in image_patches:
|
252 |
image_p = torch.stack([image_patch])
|
|
|
253 |
with torch.set_grad_enabled(False):
|
254 |
cnn_feature_p = vision_tower_high(image_p)
|
255 |
cnn_feature_p = cnn_feature_p.flatten(2).permute(0, 2, 1)
|
|
|
237 |
|
238 |
for image in images:
|
239 |
P, C, H, W = image.shape
|
|
|
240 |
if P == 1:
|
241 |
with torch.set_grad_enabled(False):
|
242 |
cnn_feature = vision_tower_high(image)
|
|
|
249 |
image_patches_features = []
|
250 |
for image_patch in image_patches:
|
251 |
image_p = torch.stack([image_patch])
|
252 |
+
print(image_p.shape)
|
253 |
with torch.set_grad_enabled(False):
|
254 |
cnn_feature_p = vision_tower_high(image_p)
|
255 |
cnn_feature_p = cnn_feature_p.flatten(2).permute(0, 2, 1)
|