fix compatibility issue for transformers 4.46+
Browse files- configuration_intern_vit.py +1 -0
- configuration_internvl_chat.py +3 -3
- conversation.py +15 -17
- modeling_intern_vit.py +1 -0
- modeling_internvl_chat.py +6 -7
configuration_intern_vit.py
CHANGED
@@ -3,6 +3,7 @@
|
|
3 |
# Copyright (c) 2024 OpenGVLab
|
4 |
# Licensed under The MIT License [see LICENSE for details]
|
5 |
# --------------------------------------------------------
|
|
|
6 |
import os
|
7 |
from typing import Union
|
8 |
|
|
|
3 |
# Copyright (c) 2024 OpenGVLab
|
4 |
# Licensed under The MIT License [see LICENSE for details]
|
5 |
# --------------------------------------------------------
|
6 |
+
|
7 |
import os
|
8 |
from typing import Union
|
9 |
|
configuration_internvl_chat.py
CHANGED
@@ -47,12 +47,12 @@ class InternVLChatConfig(PretrainedConfig):
|
|
47 |
logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
|
48 |
|
49 |
self.vision_config = InternVisionConfig(**vision_config)
|
50 |
-
if llm_config
|
51 |
self.llm_config = LlamaConfig(**llm_config)
|
52 |
-
elif llm_config['architectures'][0] == 'Phi3ForCausalLM':
|
53 |
self.llm_config = Phi3Config(**llm_config)
|
54 |
else:
|
55 |
-
raise ValueError('Unsupported architecture: {}'.format(llm_config['architectures'][0]))
|
56 |
self.use_backbone_lora = use_backbone_lora
|
57 |
self.use_llm_lora = use_llm_lora
|
58 |
self.select_layer = select_layer
|
|
|
47 |
logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
|
48 |
|
49 |
self.vision_config = InternVisionConfig(**vision_config)
|
50 |
+
if llm_config.get('architectures')[0] == 'LlamaForCausalLM':
|
51 |
self.llm_config = LlamaConfig(**llm_config)
|
52 |
+
elif llm_config.get['architectures'][0] == 'Phi3ForCausalLM':
|
53 |
self.llm_config = Phi3Config(**llm_config)
|
54 |
else:
|
55 |
+
raise ValueError('Unsupported architecture: {}'.format(llm_config.get(['architectures'])[0]))
|
56 |
self.use_backbone_lora = use_backbone_lora
|
57 |
self.use_llm_lora = use_llm_lora
|
58 |
self.select_layer = select_layer
|
conversation.py
CHANGED
@@ -3,11 +3,13 @@ Conversation prompt templates.
|
|
3 |
|
4 |
We kindly request that you import fastchat instead of copying this file if you wish to use it.
|
5 |
If you have changes in mind, please contribute back so the community can benefit collectively and continue to maintain these valuable templates.
|
|
|
|
|
6 |
"""
|
7 |
|
8 |
import dataclasses
|
9 |
from enum import IntEnum, auto
|
10 |
-
from typing import
|
11 |
|
12 |
|
13 |
class SeparatorStyle(IntEnum):
|
@@ -344,12 +346,6 @@ register_conv_template(
|
|
344 |
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
345 |
sep_style=SeparatorStyle.MPT,
|
346 |
sep='<|im_end|>',
|
347 |
-
stop_token_ids=[
|
348 |
-
2,
|
349 |
-
6,
|
350 |
-
7,
|
351 |
-
8,
|
352 |
-
],
|
353 |
stop_str='<|endoftext|>',
|
354 |
)
|
355 |
)
|
@@ -365,11 +361,6 @@ register_conv_template(
|
|
365 |
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
366 |
sep_style=SeparatorStyle.MPT,
|
367 |
sep='<|im_end|>',
|
368 |
-
stop_token_ids=[
|
369 |
-
2,
|
370 |
-
92543,
|
371 |
-
92542
|
372 |
-
]
|
373 |
)
|
374 |
)
|
375 |
|
@@ -384,10 +375,17 @@ register_conv_template(
|
|
384 |
roles=('<|user|>\n', '<|assistant|>\n'),
|
385 |
sep_style=SeparatorStyle.MPT,
|
386 |
sep='<|end|>',
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
392 |
)
|
393 |
)
|
|
|
3 |
|
4 |
We kindly request that you import fastchat instead of copying this file if you wish to use it.
|
5 |
If you have changes in mind, please contribute back so the community can benefit collectively and continue to maintain these valuable templates.
|
6 |
+
|
7 |
+
Modified from https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py
|
8 |
"""
|
9 |
|
10 |
import dataclasses
|
11 |
from enum import IntEnum, auto
|
12 |
+
from typing import Dict, List, Tuple, Union
|
13 |
|
14 |
|
15 |
class SeparatorStyle(IntEnum):
|
|
|
346 |
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
347 |
sep_style=SeparatorStyle.MPT,
|
348 |
sep='<|im_end|>',
|
|
|
|
|
|
|
|
|
|
|
|
|
349 |
stop_str='<|endoftext|>',
|
350 |
)
|
351 |
)
|
|
|
361 |
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
362 |
sep_style=SeparatorStyle.MPT,
|
363 |
sep='<|im_end|>',
|
|
|
|
|
|
|
|
|
|
|
364 |
)
|
365 |
)
|
366 |
|
|
|
375 |
roles=('<|user|>\n', '<|assistant|>\n'),
|
376 |
sep_style=SeparatorStyle.MPT,
|
377 |
sep='<|end|>',
|
378 |
+
)
|
379 |
+
)
|
380 |
+
|
381 |
+
|
382 |
+
register_conv_template(
|
383 |
+
Conversation(
|
384 |
+
name='internvl2_5',
|
385 |
+
system_template='<|im_start|>system\n{system_message}',
|
386 |
+
system_message='你是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
|
387 |
+
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
388 |
+
sep_style=SeparatorStyle.MPT,
|
389 |
+
sep='<|im_end|>\n',
|
390 |
)
|
391 |
)
|
modeling_intern_vit.py
CHANGED
@@ -3,6 +3,7 @@
|
|
3 |
# Copyright (c) 2024 OpenGVLab
|
4 |
# Licensed under The MIT License [see LICENSE for details]
|
5 |
# --------------------------------------------------------
|
|
|
6 |
from typing import Optional, Tuple, Union
|
7 |
|
8 |
import torch
|
|
|
3 |
# Copyright (c) 2024 OpenGVLab
|
4 |
# Licensed under The MIT License [see LICENSE for details]
|
5 |
# --------------------------------------------------------
|
6 |
+
|
7 |
from typing import Optional, Tuple, Union
|
8 |
|
9 |
import torch
|
modeling_internvl_chat.py
CHANGED
@@ -3,8 +3,9 @@
|
|
3 |
# Copyright (c) 2024 OpenGVLab
|
4 |
# Licensed under The MIT License [see LICENSE for details]
|
5 |
# --------------------------------------------------------
|
|
|
6 |
import warnings
|
7 |
-
from typing import
|
8 |
|
9 |
import torch.utils.checkpoint
|
10 |
import transformers
|
@@ -237,7 +238,7 @@ class InternVLChatModel(PreTrainedModel):
|
|
237 |
model_inputs = tokenizer(queries, return_tensors='pt', padding=True)
|
238 |
input_ids = model_inputs['input_ids'].to(self.device)
|
239 |
attention_mask = model_inputs['attention_mask'].to(self.device)
|
240 |
-
eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
|
241 |
generation_config['eos_token_id'] = eos_token_id
|
242 |
generation_output = self.generate(
|
243 |
pixel_values=pixel_values,
|
@@ -246,7 +247,7 @@ class InternVLChatModel(PreTrainedModel):
|
|
246 |
**generation_config
|
247 |
)
|
248 |
responses = tokenizer.batch_decode(generation_output, skip_special_tokens=True)
|
249 |
-
responses = [response.split(template.sep)[0].strip() for response in responses]
|
250 |
return responses
|
251 |
|
252 |
def chat(self, tokenizer, pixel_values, question, generation_config, history=None, return_history=False,
|
@@ -265,7 +266,7 @@ class InternVLChatModel(PreTrainedModel):
|
|
265 |
|
266 |
template = get_conv_template(self.template)
|
267 |
template.system_message = self.system_message
|
268 |
-
eos_token_id = tokenizer.convert_tokens_to_ids(template.sep)
|
269 |
|
270 |
history = [] if history is None else history
|
271 |
for (old_question, old_answer) in history:
|
@@ -294,7 +295,7 @@ class InternVLChatModel(PreTrainedModel):
|
|
294 |
**generation_config
|
295 |
)
|
296 |
response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0]
|
297 |
-
response = response.split(template.sep)[0].strip()
|
298 |
history.append((question, response))
|
299 |
if return_history:
|
300 |
return response, history
|
@@ -314,7 +315,6 @@ class InternVLChatModel(PreTrainedModel):
|
|
314 |
visual_features: Optional[torch.FloatTensor] = None,
|
315 |
generation_config: Optional[GenerationConfig] = None,
|
316 |
output_hidden_states: Optional[bool] = None,
|
317 |
-
return_dict: Optional[bool] = None,
|
318 |
**generate_kwargs,
|
319 |
) -> torch.LongTensor:
|
320 |
|
@@ -342,7 +342,6 @@ class InternVLChatModel(PreTrainedModel):
|
|
342 |
attention_mask=attention_mask,
|
343 |
generation_config=generation_config,
|
344 |
output_hidden_states=output_hidden_states,
|
345 |
-
return_dict=return_dict,
|
346 |
use_cache=True,
|
347 |
**generate_kwargs,
|
348 |
)
|
|
|
3 |
# Copyright (c) 2024 OpenGVLab
|
4 |
# Licensed under The MIT License [see LICENSE for details]
|
5 |
# --------------------------------------------------------
|
6 |
+
|
7 |
import warnings
|
8 |
+
from typing import List, Optional, Tuple, Union
|
9 |
|
10 |
import torch.utils.checkpoint
|
11 |
import transformers
|
|
|
238 |
model_inputs = tokenizer(queries, return_tensors='pt', padding=True)
|
239 |
input_ids = model_inputs['input_ids'].to(self.device)
|
240 |
attention_mask = model_inputs['attention_mask'].to(self.device)
|
241 |
+
eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip())
|
242 |
generation_config['eos_token_id'] = eos_token_id
|
243 |
generation_output = self.generate(
|
244 |
pixel_values=pixel_values,
|
|
|
247 |
**generation_config
|
248 |
)
|
249 |
responses = tokenizer.batch_decode(generation_output, skip_special_tokens=True)
|
250 |
+
responses = [response.split(template.sep.strip())[0].strip() for response in responses]
|
251 |
return responses
|
252 |
|
253 |
def chat(self, tokenizer, pixel_values, question, generation_config, history=None, return_history=False,
|
|
|
266 |
|
267 |
template = get_conv_template(self.template)
|
268 |
template.system_message = self.system_message
|
269 |
+
eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip())
|
270 |
|
271 |
history = [] if history is None else history
|
272 |
for (old_question, old_answer) in history:
|
|
|
295 |
**generation_config
|
296 |
)
|
297 |
response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0]
|
298 |
+
response = response.split(template.sep.strip())[0].strip()
|
299 |
history.append((question, response))
|
300 |
if return_history:
|
301 |
return response, history
|
|
|
315 |
visual_features: Optional[torch.FloatTensor] = None,
|
316 |
generation_config: Optional[GenerationConfig] = None,
|
317 |
output_hidden_states: Optional[bool] = None,
|
|
|
318 |
**generate_kwargs,
|
319 |
) -> torch.LongTensor:
|
320 |
|
|
|
342 |
attention_mask=attention_mask,
|
343 |
generation_config=generation_config,
|
344 |
output_hidden_states=output_hidden_states,
|
|
|
345 |
use_cache=True,
|
346 |
**generate_kwargs,
|
347 |
)
|