RanchiZhao
commited on
Commit
•
b4c018c
1
Parent(s):
fd8ff8f
Update modeling_minicpm.py
Browse files- modeling_minicpm.py +14 -38
modeling_minicpm.py
CHANGED
@@ -48,7 +48,7 @@ from transformers.utils import (
|
|
48 |
replace_return_docstrings,
|
49 |
)
|
50 |
from transformers.utils.import_utils import is_torch_fx_available
|
51 |
-
from .configuration_minicpm import
|
52 |
import re
|
53 |
|
54 |
try:
|
@@ -69,7 +69,7 @@ if is_torch_fx_available():
|
|
69 |
|
70 |
logger = logging.get_logger(__name__)
|
71 |
|
72 |
-
_CONFIG_FOR_DOC = "
|
73 |
|
74 |
|
75 |
def _get_unpad_data(attention_mask):
|
@@ -220,7 +220,6 @@ class MiniCPMLongRoPE(MiniCPMRotaryEmbedding):
|
|
220 |
self.scaling_factor = math.sqrt(
|
221 |
1 + math.log(scale) /
|
222 |
math.log(self.original_max_position_embeddings))
|
223 |
-
print("using longrope!!!")
|
224 |
super().__init__(dim, max_position_embeddings, base, device)
|
225 |
|
226 |
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
@@ -235,8 +234,6 @@ class MiniCPMLongRoPE(MiniCPMRotaryEmbedding):
|
|
235 |
torch.outer(t, 1.0 / ext_factors).to(device=device),
|
236 |
self.inv_freq.to(device=device).to(dtype)
|
237 |
)
|
238 |
-
# print("??? rope freqs: ", freqs.shape)
|
239 |
-
# print("??? rope freqs: ", freqs)
|
240 |
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
241 |
emb = torch.cat((freqs, freqs), dim=-1)
|
242 |
self.register_buffer("cos_cached", emb.cos().to(dtype) * self.scaling_factor, persistent=False)
|
@@ -334,7 +331,7 @@ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
|
334 |
class MiniCPMAttention(nn.Module):
|
335 |
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
336 |
|
337 |
-
def __init__(self, config:
|
338 |
super().__init__()
|
339 |
self.config = config
|
340 |
self.layer_idx = layer_idx
|
@@ -601,17 +598,8 @@ class MiniCPMFlashAttention2(MiniCPMAttention):
|
|
601 |
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
602 |
|
603 |
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
604 |
-
# print("before rope q shape: ", q_pe.shape)
|
605 |
-
# print("before rope k shape: ", k_pe.shape)
|
606 |
-
# print("before rope q: ", q_pe.transpose(1, 2))
|
607 |
-
# print("before rope k: ", k_pe)
|
608 |
q_pe, k_pe = apply_rotary_pos_emb(q_pe, k_pe, cos, sin, position_ids)
|
609 |
-
|
610 |
-
# print("after rope k shape: ", k_pe.shape)
|
611 |
-
# print("after rope q: ", q_pe.transpose(1, 2))
|
612 |
-
# print("after rope k: ", k_pe)
|
613 |
-
# exit(1)
|
614 |
-
|
615 |
query_states = k_pe.new_empty(bsz, self.num_heads, q_len, self.q_head_dim)
|
616 |
query_states[:, :, :, : self.qk_nope_head_dim] = q_nope
|
617 |
query_states[:, :, :, self.qk_nope_head_dim :] = q_pe
|
@@ -637,12 +625,6 @@ class MiniCPMFlashAttention2(MiniCPMAttention):
|
|
637 |
|
638 |
dropout_rate = self.attention_dropout if self.training else 0.0
|
639 |
|
640 |
-
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
|
641 |
-
# therefore the input hidden states gets silently casted in float32. Hence, we need
|
642 |
-
# cast them back in the correct dtype just to be sure everything works as expected.
|
643 |
-
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
|
644 |
-
# in fp32. (DeepseekV2RMSNorm handles it correctly)
|
645 |
-
|
646 |
input_dtype = query_states.dtype
|
647 |
if input_dtype == torch.float32:
|
648 |
# Handle the case where the model is quantized
|
@@ -802,7 +784,7 @@ class MiniCPMSdpaAttention(MiniCPMAttention):
|
|
802 |
if output_attentions:
|
803 |
# TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
|
804 |
logger.warning_once(
|
805 |
-
"
|
806 |
'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
807 |
)
|
808 |
return super().forward(
|
@@ -902,7 +884,7 @@ MINICPM_ATTENTION_CLASSES = {
|
|
902 |
|
903 |
|
904 |
class MiniCPMDecoderLayer(nn.Module):
|
905 |
-
def __init__(self, config:
|
906 |
super().__init__()
|
907 |
self.hidden_size = config.hidden_size
|
908 |
self.self_attn = MINICPM_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
|
@@ -986,7 +968,7 @@ MINICPM_START_DOCSTRING = r"""
|
|
986 |
and behavior.
|
987 |
|
988 |
Parameters:
|
989 |
-
config ([`
|
990 |
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
991 |
load the weights associated with the model, only the configuration. Check out the
|
992 |
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
@@ -998,7 +980,7 @@ MINICPM_START_DOCSTRING = r"""
|
|
998 |
MINICPM_START_DOCSTRING,
|
999 |
)
|
1000 |
class MiniCPM3PreTrainedModel(PreTrainedModel):
|
1001 |
-
config_class =
|
1002 |
base_model_prefix = "model"
|
1003 |
supports_gradient_checkpointing = True
|
1004 |
_no_split_modules = ["MiniCPMDecoderLayer"]
|
@@ -1098,10 +1080,10 @@ class MiniCPM3Model(MiniCPM3PreTrainedModel):
|
|
1098 |
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MiniCPMDecoderLayer`]
|
1099 |
|
1100 |
Args:
|
1101 |
-
config:
|
1102 |
"""
|
1103 |
|
1104 |
-
def __init__(self, config:
|
1105 |
super().__init__(config)
|
1106 |
self.padding_idx = config.pad_token_id
|
1107 |
self.vocab_size = config.vocab_size
|
@@ -1239,9 +1221,7 @@ class MiniCPM3Model(MiniCPM3PreTrainedModel):
|
|
1239 |
all_self_attns += (layer_outputs[1],)
|
1240 |
|
1241 |
hidden_states = self.norm(hidden_states)
|
1242 |
-
|
1243 |
-
# print("model outputs: ", hidden_states)
|
1244 |
-
|
1245 |
# add hidden states from the last decoder layer
|
1246 |
if output_hidden_states:
|
1247 |
all_hidden_states += (hidden_states,)
|
@@ -1264,7 +1244,7 @@ class MiniCPM3ForCausalLM(MiniCPM3PreTrainedModel):
|
|
1264 |
|
1265 |
def __init__(self, config):
|
1266 |
super().__init__(config)
|
1267 |
-
self.model =
|
1268 |
self.vocab_size = config.vocab_size
|
1269 |
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
1270 |
|
@@ -1461,15 +1441,11 @@ class MiniCPM3ForCausalLM(MiniCPM3PreTrainedModel):
|
|
1461 |
"temperature": temperature, "logits_processor": logits_processor, **kwargs}
|
1462 |
|
1463 |
history.append({"role": role, "content": query})
|
1464 |
-
history_str = tokenizer.apply_chat_template(history, tokenize=False, add_generation_prompt=
|
1465 |
inputs = tokenizer(history_str, return_tensors='pt').to(self.device)
|
1466 |
outputs = self.generate(**inputs, **gen_kwargs)
|
1467 |
outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):-1]
|
1468 |
response = tokenizer.decode(outputs)
|
1469 |
-
pattern = re.compile(r".*?(?=<AI>|<用户>)", re.DOTALL)
|
1470 |
-
matches = pattern.findall(response)
|
1471 |
-
if len(matches) > 0:
|
1472 |
-
response = matches[0]
|
1473 |
history.append({"role": "assistant", "content": response})
|
1474 |
return response, history
|
1475 |
|
@@ -1493,7 +1469,7 @@ class MiniCPM3ForSequenceClassification(MiniCPM3PreTrainedModel):
|
|
1493 |
def __init__(self, config):
|
1494 |
super().__init__(config)
|
1495 |
self.num_labels = config.num_labels
|
1496 |
-
self.model =
|
1497 |
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
1498 |
|
1499 |
# Initialize weights and apply final processing
|
|
|
48 |
replace_return_docstrings,
|
49 |
)
|
50 |
from transformers.utils.import_utils import is_torch_fx_available
|
51 |
+
from .configuration_minicpm import MiniCPM3Config
|
52 |
import re
|
53 |
|
54 |
try:
|
|
|
69 |
|
70 |
logger = logging.get_logger(__name__)
|
71 |
|
72 |
+
_CONFIG_FOR_DOC = "MiniCPM3Config"
|
73 |
|
74 |
|
75 |
def _get_unpad_data(attention_mask):
|
|
|
220 |
self.scaling_factor = math.sqrt(
|
221 |
1 + math.log(scale) /
|
222 |
math.log(self.original_max_position_embeddings))
|
|
|
223 |
super().__init__(dim, max_position_embeddings, base, device)
|
224 |
|
225 |
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
|
|
234 |
torch.outer(t, 1.0 / ext_factors).to(device=device),
|
235 |
self.inv_freq.to(device=device).to(dtype)
|
236 |
)
|
|
|
|
|
237 |
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
238 |
emb = torch.cat((freqs, freqs), dim=-1)
|
239 |
self.register_buffer("cos_cached", emb.cos().to(dtype) * self.scaling_factor, persistent=False)
|
|
|
331 |
class MiniCPMAttention(nn.Module):
|
332 |
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
333 |
|
334 |
+
def __init__(self, config: MiniCPM3Config, layer_idx: Optional[int] = None):
|
335 |
super().__init__()
|
336 |
self.config = config
|
337 |
self.layer_idx = layer_idx
|
|
|
598 |
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
599 |
|
600 |
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
|
|
|
|
|
|
|
|
601 |
q_pe, k_pe = apply_rotary_pos_emb(q_pe, k_pe, cos, sin, position_ids)
|
602 |
+
|
|
|
|
|
|
|
|
|
|
|
603 |
query_states = k_pe.new_empty(bsz, self.num_heads, q_len, self.q_head_dim)
|
604 |
query_states[:, :, :, : self.qk_nope_head_dim] = q_nope
|
605 |
query_states[:, :, :, self.qk_nope_head_dim :] = q_pe
|
|
|
625 |
|
626 |
dropout_rate = self.attention_dropout if self.training else 0.0
|
627 |
|
|
|
|
|
|
|
|
|
|
|
|
|
628 |
input_dtype = query_states.dtype
|
629 |
if input_dtype == torch.float32:
|
630 |
# Handle the case where the model is quantized
|
|
|
784 |
if output_attentions:
|
785 |
# TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
|
786 |
logger.warning_once(
|
787 |
+
"MiniCPM3Model is using MiniCPMSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
|
788 |
'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
789 |
)
|
790 |
return super().forward(
|
|
|
884 |
|
885 |
|
886 |
class MiniCPMDecoderLayer(nn.Module):
|
887 |
+
def __init__(self, config: MiniCPM3Config, layer_idx: int):
|
888 |
super().__init__()
|
889 |
self.hidden_size = config.hidden_size
|
890 |
self.self_attn = MINICPM_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
|
|
|
968 |
and behavior.
|
969 |
|
970 |
Parameters:
|
971 |
+
config ([`MiniCPM3Config`]):
|
972 |
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
973 |
load the weights associated with the model, only the configuration. Check out the
|
974 |
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
|
|
980 |
MINICPM_START_DOCSTRING,
|
981 |
)
|
982 |
class MiniCPM3PreTrainedModel(PreTrainedModel):
|
983 |
+
config_class = MiniCPM3Config
|
984 |
base_model_prefix = "model"
|
985 |
supports_gradient_checkpointing = True
|
986 |
_no_split_modules = ["MiniCPMDecoderLayer"]
|
|
|
1080 |
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MiniCPMDecoderLayer`]
|
1081 |
|
1082 |
Args:
|
1083 |
+
config: MiniCPM3Config
|
1084 |
"""
|
1085 |
|
1086 |
+
def __init__(self, config: MiniCPM3Config):
|
1087 |
super().__init__(config)
|
1088 |
self.padding_idx = config.pad_token_id
|
1089 |
self.vocab_size = config.vocab_size
|
|
|
1221 |
all_self_attns += (layer_outputs[1],)
|
1222 |
|
1223 |
hidden_states = self.norm(hidden_states)
|
1224 |
+
|
|
|
|
|
1225 |
# add hidden states from the last decoder layer
|
1226 |
if output_hidden_states:
|
1227 |
all_hidden_states += (hidden_states,)
|
|
|
1244 |
|
1245 |
def __init__(self, config):
|
1246 |
super().__init__(config)
|
1247 |
+
self.model = MiniCPM3Model(config)
|
1248 |
self.vocab_size = config.vocab_size
|
1249 |
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
1250 |
|
|
|
1441 |
"temperature": temperature, "logits_processor": logits_processor, **kwargs}
|
1442 |
|
1443 |
history.append({"role": role, "content": query})
|
1444 |
+
history_str = tokenizer.apply_chat_template(history, tokenize=False, add_generation_prompt=True)
|
1445 |
inputs = tokenizer(history_str, return_tensors='pt').to(self.device)
|
1446 |
outputs = self.generate(**inputs, **gen_kwargs)
|
1447 |
outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):-1]
|
1448 |
response = tokenizer.decode(outputs)
|
|
|
|
|
|
|
|
|
1449 |
history.append({"role": "assistant", "content": response})
|
1450 |
return response, history
|
1451 |
|
|
|
1469 |
def __init__(self, config):
|
1470 |
super().__init__(config)
|
1471 |
self.num_labels = config.num_labels
|
1472 |
+
self.model = MiniCPM3Model(config)
|
1473 |
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
1474 |
|
1475 |
# Initialize weights and apply final processing
|