JinghuiLuAstronaut commited on
Commit
56f6578
1 Parent(s): a23a097

Upload BaichuanForCausalLM

Browse files
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "_name_or_path": "/mnt/bn/lujinghui-nas-lq/models/baichuan2_7b_docllm/",
4
+ "architectures": [
5
+ "BaichuanForCausalLM"
6
+ ],
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_baichuan.BaichuanConfig",
9
+ "AutoModelForCausalLM": "modeling_baichuan.BaichuanForCausalLM"
10
+ },
11
+ "bos_token_id": 1,
12
+ "eos_token_id": 2,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 4096,
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 11008,
17
+ "lambda_ss": 1.0,
18
+ "lambda_st": 1.0,
19
+ "lambda_ts": 1.0,
20
+ "max_position_embeddings": 4096,
21
+ "model_max_length": 4096,
22
+ "model_type": "baichuan",
23
+ "num_attention_heads": 32,
24
+ "num_hidden_layers": 32,
25
+ "pad_token_id": 0,
26
+ "rms_norm_eps": 1e-06,
27
+ "tie_word_embeddings": false,
28
+ "torch_dtype": "float16",
29
+ "transformers_version": "4.36.2",
30
+ "use_cache": true,
31
+ "vocab_size": 125696
32
+ }
configuration_baichuan.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Baichuan Inc. All Rights Reserved.
2
+
3
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
6
+ # and OPT implementations in this library. It has been modified from its
7
+ # original forms to accommodate minor architectural differences compared
8
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+
22
+ from transformers.configuration_utils import PretrainedConfig
23
+ from transformers.utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ class BaichuanConfig(PretrainedConfig):
30
+ model_type = "baichuan"
31
+ keys_to_ignore_at_inference = ["past_key_values"]
32
+
33
+ def __init__(
34
+ self,
35
+ vocab_size=125696,
36
+ hidden_size=4096,
37
+ intermediate_size=11008,
38
+ num_hidden_layers=32,
39
+ num_attention_heads=32,
40
+ hidden_act="silu",
41
+ max_position_embeddings=4096,
42
+ initializer_range=0.02,
43
+ rms_norm_eps=1e-6,
44
+ use_cache=True,
45
+ pad_token_id=0,
46
+ bos_token_id=1,
47
+ eos_token_id=2,
48
+ tie_word_embeddings=False,
49
+ lambda_ts: float = 1.0,
50
+ lambda_st: float = 1.0,
51
+ lambda_ss: float = 1.0,
52
+ **kwargs,
53
+ ):
54
+ self.vocab_size = vocab_size
55
+ self.max_position_embeddings = max_position_embeddings
56
+ self.hidden_size = hidden_size
57
+ self.intermediate_size = intermediate_size
58
+ self.num_hidden_layers = num_hidden_layers
59
+ self.num_attention_heads = num_attention_heads
60
+ self.hidden_act = hidden_act
61
+ self.initializer_range = initializer_range
62
+ self.rms_norm_eps = rms_norm_eps
63
+ self.use_cache = use_cache
64
+ self.lambda_ts = lambda_ts
65
+ self.lambda_st = lambda_st
66
+ self.lambda_ss = lambda_ss
67
+ super().__init__(
68
+ pad_token_id=pad_token_id,
69
+ bos_token_id=bos_token_id,
70
+ eos_token_id=eos_token_id,
71
+ tie_word_embeddings=tie_word_embeddings,
72
+ **kwargs,
73
+ )
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.36.2"
7
+ }
generation_utils.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ from queue import Queue
3
+
4
+ import torch
5
+
6
+
7
+ def build_chat_input(model, tokenizer, messages: List[dict], max_new_tokens: int=0):
8
+ def _parse_messages(messages, split_role="user"):
9
+ system, rounds = "", []
10
+ round = []
11
+ for i, message in enumerate(messages):
12
+ if message["role"] == "system":
13
+ assert i == 0
14
+ system = message["content"]
15
+ continue
16
+ if message["role"] == split_role and round:
17
+ rounds.append(round)
18
+ round = []
19
+ round.append(message)
20
+ if round:
21
+ rounds.append(round)
22
+ return system, rounds
23
+
24
+ max_new_tokens = max_new_tokens or model.generation_config.max_new_tokens
25
+ max_input_tokens = model.config.model_max_length - max_new_tokens
26
+ system, rounds = _parse_messages(messages, split_role="user")
27
+ system_tokens = tokenizer.encode(system)
28
+ max_history_tokens = max_input_tokens - len(system_tokens)
29
+
30
+ history_tokens = []
31
+ for round in rounds[::-1]:
32
+ round_tokens = []
33
+ for message in round:
34
+ if message["role"] == "user":
35
+ round_tokens.append(model.generation_config.user_token_id)
36
+ else:
37
+ round_tokens.append(model.generation_config.assistant_token_id)
38
+ round_tokens.extend(tokenizer.encode(message["content"]))
39
+ if len(history_tokens) == 0 or len(history_tokens) + len(round_tokens) <= max_history_tokens:
40
+ history_tokens = round_tokens + history_tokens # concat left
41
+ if len(history_tokens) < max_history_tokens:
42
+ continue
43
+ break
44
+
45
+ input_tokens = system_tokens + history_tokens
46
+ if messages[-1]["role"] != "assistant":
47
+ input_tokens.append(model.generation_config.assistant_token_id)
48
+ input_tokens = input_tokens[-max_input_tokens:] # truncate left
49
+ return torch.LongTensor([input_tokens]).to(model.device)
50
+
51
+
52
+ class TextIterStreamer:
53
+ def __init__(self, tokenizer, skip_prompt=False, skip_special_tokens=False):
54
+ self.tokenizer = tokenizer
55
+ self.skip_prompt = skip_prompt
56
+ self.skip_special_tokens = skip_special_tokens
57
+ self.tokens = []
58
+ self.text_queue = Queue()
59
+ self.next_tokens_are_prompt = True
60
+
61
+ def put(self, value):
62
+ if self.skip_prompt and self.next_tokens_are_prompt:
63
+ self.next_tokens_are_prompt = False
64
+ else:
65
+ if len(value.shape) > 1:
66
+ value = value[0]
67
+ self.tokens.extend(value.tolist())
68
+ self.text_queue.put(
69
+ self.tokenizer.decode(self.tokens, skip_special_tokens=self.skip_special_tokens))
70
+
71
+ def end(self):
72
+ self.text_queue.put(None)
73
+
74
+ def __iter__(self):
75
+ return self
76
+
77
+ def __next__(self):
78
+ value = self.text_queue.get()
79
+ if value is None:
80
+ raise StopIteration()
81
+ else:
82
+ return value
83
+
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac3198f89b174603f9675ca1decf8d048d23b5cae4b58b19cc1079722b2826ce
3
+ size 4982987912
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cc485600cd477a886884262d68e8093df34d8b910f03a73cd7e59736e781ae2
3
+ size 4964131832
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66a4c399876dfc0e2f4c76f371766fb356f886d029b5256b589a6295b02f73bd
3
+ size 4964131848
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:708405dfea28b7a51ca221e7f3dd0e56505cd1c6627629bb5d7dfcff4e852b92
3
+ size 3321983416
model.safetensors.index.json ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 18233204736
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00004-of-00004.safetensors",
7
+ "model.embed_spatial.weight": "model-00001-of-00004.safetensors",
8
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
9
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
10
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
11
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
12
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
13
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
14
+ "model.layers.0.self_attn.W_pack.weight": "model-00001-of-00004.safetensors",
15
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
16
+ "model.layers.0.self_attn.spatial_W_pack.weight": "model-00001-of-00004.safetensors",
17
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
18
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
19
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
20
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
21
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
22
+ "model.layers.1.self_attn.W_pack.weight": "model-00001-of-00004.safetensors",
23
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
24
+ "model.layers.1.self_attn.spatial_W_pack.weight": "model-00001-of-00004.safetensors",
25
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
26
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
27
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
28
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
29
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
30
+ "model.layers.10.self_attn.W_pack.weight": "model-00002-of-00004.safetensors",
31
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
32
+ "model.layers.10.self_attn.spatial_W_pack.weight": "model-00002-of-00004.safetensors",
33
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
34
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
35
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
36
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
37
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
38
+ "model.layers.11.self_attn.W_pack.weight": "model-00002-of-00004.safetensors",
39
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
40
+ "model.layers.11.self_attn.spatial_W_pack.weight": "model-00002-of-00004.safetensors",
41
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
42
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
43
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
44
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
45
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
46
+ "model.layers.12.self_attn.W_pack.weight": "model-00002-of-00004.safetensors",
47
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
48
+ "model.layers.12.self_attn.spatial_W_pack.weight": "model-00002-of-00004.safetensors",
49
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
50
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
51
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
52
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
53
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
54
+ "model.layers.13.self_attn.W_pack.weight": "model-00002-of-00004.safetensors",
55
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
56
+ "model.layers.13.self_attn.spatial_W_pack.weight": "model-00002-of-00004.safetensors",
57
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
58
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
59
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
60
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
61
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
62
+ "model.layers.14.self_attn.W_pack.weight": "model-00002-of-00004.safetensors",
63
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
64
+ "model.layers.14.self_attn.spatial_W_pack.weight": "model-00002-of-00004.safetensors",
65
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
66
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
67
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
68
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
69
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
70
+ "model.layers.15.self_attn.W_pack.weight": "model-00002-of-00004.safetensors",
71
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
72
+ "model.layers.15.self_attn.spatial_W_pack.weight": "model-00002-of-00004.safetensors",
73
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
74
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
75
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
76
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
77
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
78
+ "model.layers.16.self_attn.W_pack.weight": "model-00002-of-00004.safetensors",
79
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
80
+ "model.layers.16.self_attn.spatial_W_pack.weight": "model-00002-of-00004.safetensors",
81
+ "model.layers.17.input_layernorm.weight": "model-00003-of-00004.safetensors",
82
+ "model.layers.17.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
83
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
84
+ "model.layers.17.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
85
+ "model.layers.17.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
86
+ "model.layers.17.self_attn.W_pack.weight": "model-00002-of-00004.safetensors",
87
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
88
+ "model.layers.17.self_attn.spatial_W_pack.weight": "model-00002-of-00004.safetensors",
89
+ "model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
90
+ "model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
91
+ "model.layers.18.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
92
+ "model.layers.18.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
93
+ "model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
94
+ "model.layers.18.self_attn.W_pack.weight": "model-00003-of-00004.safetensors",
95
+ "model.layers.18.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
96
+ "model.layers.18.self_attn.spatial_W_pack.weight": "model-00003-of-00004.safetensors",
97
+ "model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
98
+ "model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
99
+ "model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
100
+ "model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
101
+ "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
102
+ "model.layers.19.self_attn.W_pack.weight": "model-00003-of-00004.safetensors",
103
+ "model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
104
+ "model.layers.19.self_attn.spatial_W_pack.weight": "model-00003-of-00004.safetensors",
105
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
106
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
107
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
108
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
109
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
110
+ "model.layers.2.self_attn.W_pack.weight": "model-00001-of-00004.safetensors",
111
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
112
+ "model.layers.2.self_attn.spatial_W_pack.weight": "model-00001-of-00004.safetensors",
113
+ "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
114
+ "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
115
+ "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
116
+ "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
117
+ "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
118
+ "model.layers.20.self_attn.W_pack.weight": "model-00003-of-00004.safetensors",
119
+ "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
120
+ "model.layers.20.self_attn.spatial_W_pack.weight": "model-00003-of-00004.safetensors",
121
+ "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
122
+ "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
123
+ "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
124
+ "model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
125
+ "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
126
+ "model.layers.21.self_attn.W_pack.weight": "model-00003-of-00004.safetensors",
127
+ "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
128
+ "model.layers.21.self_attn.spatial_W_pack.weight": "model-00003-of-00004.safetensors",
129
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
130
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
131
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
132
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
133
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
134
+ "model.layers.22.self_attn.W_pack.weight": "model-00003-of-00004.safetensors",
135
+ "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
136
+ "model.layers.22.self_attn.spatial_W_pack.weight": "model-00003-of-00004.safetensors",
137
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
138
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
139
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
140
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
141
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
142
+ "model.layers.23.self_attn.W_pack.weight": "model-00003-of-00004.safetensors",
143
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
144
+ "model.layers.23.self_attn.spatial_W_pack.weight": "model-00003-of-00004.safetensors",
145
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
146
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
147
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
148
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
149
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
150
+ "model.layers.24.self_attn.W_pack.weight": "model-00003-of-00004.safetensors",
151
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
152
+ "model.layers.24.self_attn.spatial_W_pack.weight": "model-00003-of-00004.safetensors",
153
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
154
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
155
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
156
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
157
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
158
+ "model.layers.25.self_attn.W_pack.weight": "model-00003-of-00004.safetensors",
159
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
160
+ "model.layers.25.self_attn.spatial_W_pack.weight": "model-00003-of-00004.safetensors",
161
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
162
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
163
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
164
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
165
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
166
+ "model.layers.26.self_attn.W_pack.weight": "model-00003-of-00004.safetensors",
167
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
168
+ "model.layers.26.self_attn.spatial_W_pack.weight": "model-00003-of-00004.safetensors",
169
+ "model.layers.27.input_layernorm.weight": "model-00004-of-00004.safetensors",
170
+ "model.layers.27.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
171
+ "model.layers.27.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
172
+ "model.layers.27.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
173
+ "model.layers.27.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
174
+ "model.layers.27.self_attn.W_pack.weight": "model-00003-of-00004.safetensors",
175
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
176
+ "model.layers.27.self_attn.spatial_W_pack.weight": "model-00003-of-00004.safetensors",
177
+ "model.layers.28.input_layernorm.weight": "model-00004-of-00004.safetensors",
178
+ "model.layers.28.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
179
+ "model.layers.28.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
180
+ "model.layers.28.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
181
+ "model.layers.28.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
182
+ "model.layers.28.self_attn.W_pack.weight": "model-00004-of-00004.safetensors",
183
+ "model.layers.28.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
184
+ "model.layers.28.self_attn.spatial_W_pack.weight": "model-00004-of-00004.safetensors",
185
+ "model.layers.29.input_layernorm.weight": "model-00004-of-00004.safetensors",
186
+ "model.layers.29.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
187
+ "model.layers.29.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
188
+ "model.layers.29.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
189
+ "model.layers.29.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
190
+ "model.layers.29.self_attn.W_pack.weight": "model-00004-of-00004.safetensors",
191
+ "model.layers.29.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
192
+ "model.layers.29.self_attn.spatial_W_pack.weight": "model-00004-of-00004.safetensors",
193
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
194
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
195
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
196
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
197
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
198
+ "model.layers.3.self_attn.W_pack.weight": "model-00001-of-00004.safetensors",
199
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
200
+ "model.layers.3.self_attn.spatial_W_pack.weight": "model-00001-of-00004.safetensors",
201
+ "model.layers.30.input_layernorm.weight": "model-00004-of-00004.safetensors",
202
+ "model.layers.30.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
203
+ "model.layers.30.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
204
+ "model.layers.30.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
205
+ "model.layers.30.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
206
+ "model.layers.30.self_attn.W_pack.weight": "model-00004-of-00004.safetensors",
207
+ "model.layers.30.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
208
+ "model.layers.30.self_attn.spatial_W_pack.weight": "model-00004-of-00004.safetensors",
209
+ "model.layers.31.input_layernorm.weight": "model-00004-of-00004.safetensors",
210
+ "model.layers.31.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
211
+ "model.layers.31.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
212
+ "model.layers.31.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
213
+ "model.layers.31.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
214
+ "model.layers.31.self_attn.W_pack.weight": "model-00004-of-00004.safetensors",
215
+ "model.layers.31.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
216
+ "model.layers.31.self_attn.spatial_W_pack.weight": "model-00004-of-00004.safetensors",
217
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
218
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
219
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
220
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
221
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
222
+ "model.layers.4.self_attn.W_pack.weight": "model-00001-of-00004.safetensors",
223
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
224
+ "model.layers.4.self_attn.spatial_W_pack.weight": "model-00001-of-00004.safetensors",
225
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
226
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
227
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
228
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
229
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
230
+ "model.layers.5.self_attn.W_pack.weight": "model-00001-of-00004.safetensors",
231
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
232
+ "model.layers.5.self_attn.spatial_W_pack.weight": "model-00001-of-00004.safetensors",
233
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
234
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
235
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
236
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
237
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
238
+ "model.layers.6.self_attn.W_pack.weight": "model-00001-of-00004.safetensors",
239
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
240
+ "model.layers.6.self_attn.spatial_W_pack.weight": "model-00001-of-00004.safetensors",
241
+ "model.layers.7.input_layernorm.weight": "model-00002-of-00004.safetensors",
242
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
243
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
244
+ "model.layers.7.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
245
+ "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
246
+ "model.layers.7.self_attn.W_pack.weight": "model-00001-of-00004.safetensors",
247
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
248
+ "model.layers.7.self_attn.spatial_W_pack.weight": "model-00001-of-00004.safetensors",
249
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
250
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
251
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
252
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
253
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
254
+ "model.layers.8.self_attn.W_pack.weight": "model-00002-of-00004.safetensors",
255
+ "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
256
+ "model.layers.8.self_attn.spatial_W_pack.weight": "model-00002-of-00004.safetensors",
257
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
258
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
259
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
260
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
261
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
262
+ "model.layers.9.self_attn.W_pack.weight": "model-00002-of-00004.safetensors",
263
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
264
+ "model.layers.9.self_attn.spatial_W_pack.weight": "model-00002-of-00004.safetensors",
265
+ "model.norm.weight": "model-00004-of-00004.safetensors"
266
+ }
267
+ }
modeling_baichuan.py ADDED
@@ -0,0 +1,879 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Baichuan Inc. All Rights Reserved.
2
+
3
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
6
+ # and OPT implementations in this library. It has been modified from its
7
+ # original forms to accommodate minor architectural differences compared
8
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+
22
+
23
+ from .configuration_baichuan import BaichuanConfig
24
+ from .generation_utils import build_chat_input, TextIterStreamer
25
+
26
+ import math
27
+ from typing import List, Optional, Tuple, Union
28
+ from threading import Thread
29
+
30
+ import torch
31
+ import torch.utils.checkpoint
32
+ from torch import nn
33
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
34
+ from torch.nn import functional as F
35
+ from transformers import PreTrainedModel, PretrainedConfig
36
+ from transformers.activations import ACT2FN
37
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
38
+ from transformers.generation.utils import GenerationConfig
39
+ from transformers.utils import logging, ContextManagers
40
+
41
+ import os
42
+ from contextlib import contextmanager
43
+ logger = logging.get_logger(__name__)
44
+
45
+ try:
46
+ from xformers import ops as xops
47
+ except ImportError:
48
+ xops = None
49
+ logger.warning(
50
+ "Xformers is not installed correctly. If you want to use memory_efficient_attention to accelerate training use the following command to install Xformers\npip install xformers."
51
+ )
52
+
53
+
54
+ # Copied from transformers.models.bart.modeling_bart._make_causal_mask
55
+ def _make_causal_mask(
56
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
57
+ ):
58
+ """
59
+ Make causal mask used for bi-directional self-attention.
60
+ """
61
+ bsz, tgt_len = input_ids_shape
62
+ mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
63
+ mask_cond = torch.arange(mask.size(-1), device=device)
64
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
65
+ mask = mask.to(dtype)
66
+
67
+ if past_key_values_length > 0:
68
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
69
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
70
+
71
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
72
+ """
73
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
74
+ """
75
+ if len(mask.size()) == 3:
76
+ bsz, src_len, _ = mask.size()
77
+ tgt_len = tgt_len if tgt_len is not None else src_len
78
+ expanded_mask = mask[:,None,:,:].expand(bsz, 1, tgt_len, src_len).to(dtype)
79
+ else:
80
+ bsz, src_len = mask.size()
81
+ tgt_len = tgt_len if tgt_len is not None else src_len
82
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
83
+
84
+ inverted_mask = 1.0 - expanded_mask
85
+
86
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
87
+
88
+
89
+ class RMSNorm(nn.Module):
90
+ def __init__(self, hidden_size, eps=1e-6):
91
+ """
92
+ RMSNorm is equivalent to T5LayerNorm
93
+ """
94
+ super().__init__()
95
+ self.weight = nn.Parameter(torch.ones(hidden_size))
96
+ self.variance_epsilon = eps
97
+
98
+ def forward(self, hidden_states):
99
+ variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
100
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
101
+
102
+ # convert into half-precision if necessary
103
+ if self.weight.dtype in [torch.float16, torch.bfloat16]:
104
+ hidden_states = hidden_states.to(self.weight.dtype)
105
+
106
+ return self.weight * hidden_states
107
+
108
+
109
+ class RotaryEmbedding(torch.nn.Module):
110
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
111
+ super().__init__()
112
+ self.inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
113
+ self.max_seq_len_cached = max_position_embeddings
114
+ t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=torch.float32)
115
+ freqs = torch.outer(t, self.inv_freq)
116
+ emb = torch.cat((freqs, freqs), dim=-1)
117
+ self.cos_cached = emb.cos()[None, None, :, :].to(torch.float32)
118
+ self.sin_cached = emb.sin()[None, None, :, :].to(torch.float32)
119
+ def forward(self, x, seq_len=None):
120
+ # x: [bs, num_attention_heads, seq_len, head_size]
121
+ # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
122
+ if seq_len > self.max_seq_len_cached:
123
+ self.max_seq_len_cached = seq_len
124
+ t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=torch.float32)
125
+ freqs = torch.outer(t, self.inv_freq)
126
+ emb = torch.cat((freqs, freqs), dim=-1)
127
+ self.cos_cached = emb.cos()[None, None, :, :].to(torch.float32).to(x.device)
128
+ self.sin_cached = emb.sin()[None, None, :, :].to(torch.float32).to(x.device)
129
+ elif self.cos_cached.device != x.device:
130
+ self.cos_cached = self.cos_cached.to(x.device)
131
+ self.sin_cached = self.sin_cached.to(x.device)
132
+ return (
133
+ self.cos_cached[:, :, :seq_len, ...],
134
+ self.sin_cached[:, :, :seq_len, ...],
135
+ )
136
+
137
+
138
+ def rotate_half(x):
139
+ """Rotates half the hidden dims of the input."""
140
+ x1 = x[..., : x.shape[-1] // 2]
141
+ x2 = x[..., x.shape[-1] // 2:]
142
+ return torch.cat((-x2, x1), dim=-1)
143
+
144
+
145
+ def apply_rotary_pos_emb(q, k, cos_, sin_, position_ids):
146
+ cos = cos_.squeeze(1).squeeze(0) # [seq_len, dim]
147
+ sin = sin_.squeeze(1).squeeze(0) # [seq_len, dim]
148
+ cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
149
+ sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
150
+ q_embed = (q.float() * cos) + (rotate_half(q.float()) * sin)
151
+ k_embed = (k.float() * cos) + (rotate_half(k.float()) * sin)
152
+ return q_embed.to(q.dtype), k_embed.to(k.dtype)
153
+
154
+
155
+ class MLP(nn.Module):
156
+ def __init__(
157
+ self,
158
+ hidden_size: int,
159
+ intermediate_size: int,
160
+ hidden_act: str,
161
+ ):
162
+ super().__init__()
163
+ self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
164
+ self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
165
+ self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
166
+ self.act_fn = ACT2FN[hidden_act]
167
+
168
+ def forward(self, x):
169
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
170
+
171
+
172
+ class DocLLMAttention(nn.Module):
173
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
174
+ def __init__(self, config: BaichuanConfig):
175
+ super().__init__()
176
+ self.config = config
177
+ self.hidden_size = config.hidden_size
178
+ self.num_heads = config.num_attention_heads
179
+ self.head_dim = self.hidden_size // self.num_heads
180
+ self.max_position_embeddings = config.max_position_embeddings
181
+ self._lambda_ts = config.lambda_ts
182
+ self._lambda_st = config.lambda_st
183
+ self._lambda_ss = config.lambda_ss
184
+
185
+ if (self.head_dim * self.num_heads) != self.hidden_size:
186
+ raise ValueError(
187
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
188
+ f" and `num_heads`: {self.num_heads})."
189
+ )
190
+ self.W_pack = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=False)
191
+ self.spatial_W_pack = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=False)
192
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
193
+ self.rotary_emb = RotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings)
194
+
195
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
196
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
197
+
198
+ def forward(
199
+ self,
200
+ hidden_states: torch.Tensor,
201
+ bounding_box_embeddings: torch.Tensor,
202
+ attention_mask: Optional[torch.Tensor] = None,
203
+ position_ids: Optional[torch.LongTensor] = None,
204
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
205
+ spatial_past_key_value: Optional[Tuple[torch.Tensor]] = None,
206
+ output_attentions: bool = False,
207
+ use_cache: bool = False,
208
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
209
+ bsz, q_len, _ = hidden_states.size()
210
+
211
+
212
+
213
+ proj = self.W_pack(hidden_states)
214
+ proj = proj.unflatten(-1, (3, self.hidden_size)).unsqueeze(0).transpose(0, -2).squeeze(-2)
215
+ query_states = proj[0].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
216
+ key_states = proj[1].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
217
+ value_states = proj[2].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
218
+
219
+ spatial_proj = self.spatial_W_pack(bounding_box_embeddings)
220
+ spatial_proj = spatial_proj.unflatten(-1, (3, self.hidden_size)).unsqueeze(0).transpose(0, -2).squeeze(-2)
221
+ spatial_query_states = spatial_proj[0].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
222
+ spatial_key_states = spatial_proj[1].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
223
+ spatial_value_states = spatial_proj[2].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
224
+
225
+ kv_seq_len = key_states.shape[-2]
226
+ if past_key_value is not None:
227
+ kv_seq_len += past_key_value[0].shape[-2]
228
+
229
+ spatial_kv_seq_len = spatial_key_states.shape[-2]
230
+ if spatial_past_key_value is not None:
231
+ spatial_kv_seq_len += spatial_past_key_value[0].shape[-2]
232
+
233
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
234
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
235
+
236
+ # FIXME do we need applying pos embeddings for spatial modality?
237
+ spatial_query_states, spatial_key_states = apply_rotary_pos_emb(spatial_query_states, spatial_key_states, cos, sin, position_ids)
238
+ # [bsz, nh, t, hd]
239
+
240
+ if past_key_value is not None:
241
+ # reuse k, v, self_attention
242
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
243
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
244
+
245
+ if spatial_past_key_value is not None:
246
+ # reuse k, v, self_attention
247
+ spatial_key_states = torch.cat([spatial_past_key_value[0], spatial_key_states], dim=2)
248
+ spatial_value_states = torch.cat([spatial_past_key_value[1], spatial_value_states], dim=2)
249
+
250
+ past_key_value = (key_states, value_states) if use_cache else None
251
+ spatial_past_key_value = (spatial_key_states, spatial_value_states) if use_cache else None
252
+
253
+ if xops is not None and self.training:
254
+ attn_weights = None
255
+ query_states = query_states.transpose(1, 2)
256
+ key_states = key_states.transpose(1, 2)
257
+ value_states = value_states.transpose(1, 2)
258
+ attn_output = xops.memory_efficient_attention(
259
+ query_states, key_states, value_states, attn_bias=xops.LowerTriangularMask()
260
+ )
261
+ else:
262
+ with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=True):
263
+
264
+
265
+ # attn_output = F.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask = attention_mask)
266
+ L, S = query_states.size(-2), key_states.size(-2)
267
+ scale_factor = 1 / math.sqrt(query_states.size(-1))
268
+ # attn_bias = torch.zeros(L, S, dtype=query_states.dtype)
269
+ attn_bias = torch.zeros_like(attention_mask)
270
+
271
+ if attention_mask is not None:
272
+ if attention_mask.dtype == torch.bool:
273
+ attn_bias.masked_fill_(attention_mask.logical_not(), float("-inf"))
274
+ else:
275
+ attn_bias += attention_mask
276
+
277
+ attn_weight_tt = query_states @ key_states.transpose(-2, -1) * scale_factor
278
+ attn_weight_ts = query_states @ spatial_key_states.transpose(-2, -1) * scale_factor
279
+ attn_weight_st = spatial_query_states @ key_states.transpose(-2, -1) * scale_factor
280
+ attn_weight_ss = spatial_query_states @ spatial_key_states.transpose(-2, -1) * scale_factor
281
+
282
+ attn_weight = attn_weight_tt + self._lambda_ts * attn_weight_ts + self._lambda_st * attn_weight_st + self._lambda_ss * attn_weight_ss
283
+ attn_weight += attn_bias
284
+ attn_weight = torch.softmax(attn_weight, dim=-1)
285
+ attn_weight = torch.dropout(attn_weight, 0.0, train=True)
286
+ attn_output = attn_weight @ value_states
287
+
288
+ attn_output = attn_output.transpose(1, 2)
289
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
290
+ attn_output = self.o_proj(attn_output)
291
+
292
+ if not output_attentions:
293
+ attn_weights = None
294
+
295
+ return attn_output, attn_weights, past_key_value, spatial_past_key_value
296
+
297
+
298
+ class DecoderLayer(nn.Module):
299
+ def __init__(self, config: BaichuanConfig):
300
+ super().__init__()
301
+ self.hidden_size = config.hidden_size
302
+ self.self_attn = DocLLMAttention(config=config)
303
+ self.mlp = MLP(
304
+ hidden_size=self.hidden_size,
305
+ intermediate_size=config.intermediate_size,
306
+ hidden_act=config.hidden_act,
307
+ )
308
+ self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
309
+ self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
310
+
311
+ def forward(
312
+ self,
313
+ hidden_states: torch.Tensor,
314
+ bounding_box_embeddings: torch.Tensor,
315
+ attention_mask: Optional[torch.Tensor] = None,
316
+ position_ids: Optional[torch.LongTensor] = None,
317
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
318
+ spatial_past_key_value: Optional[Tuple[torch.Tensor]] = None,
319
+ output_attentions: Optional[bool] = False,
320
+ use_cache: Optional[bool] = False,
321
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
322
+
323
+ residual = hidden_states
324
+
325
+ hidden_states = self.input_layernorm(hidden_states)
326
+
327
+ # Self Attention
328
+ hidden_states, self_attn_weights, present_key_value, spatial_present_key_value = self.self_attn(
329
+ hidden_states=hidden_states,
330
+ bounding_box_embeddings=bounding_box_embeddings,
331
+ attention_mask=attention_mask,
332
+ position_ids=position_ids,
333
+ past_key_value=past_key_value,
334
+ spatial_past_key_value=spatial_past_key_value,
335
+ output_attentions=output_attentions,
336
+ use_cache=use_cache,
337
+ )
338
+ hidden_states = residual + hidden_states
339
+
340
+ # Fully Connected
341
+ residual = hidden_states
342
+ hidden_states = self.post_attention_layernorm(hidden_states)
343
+ hidden_states = self.mlp(hidden_states)
344
+ hidden_states = residual + hidden_states
345
+
346
+ outputs = (hidden_states,)
347
+
348
+ if output_attentions:
349
+ outputs += (self_attn_weights,)
350
+
351
+ if use_cache:
352
+ outputs += (present_key_value,)
353
+ outputs += (spatial_present_key_value,)
354
+
355
+ return outputs
356
+
357
+
358
+ class BaichuanPreTrainedModel(PreTrainedModel):
359
+ config_class = BaichuanConfig
360
+ base_model_prefix = "model"
361
+ supports_gradient_checkpointing = True
362
+ _no_split_modules = ["DecoderLayer"]
363
+ _keys_to_ignore_on_load_unexpected = [r"decoder\.version"]
364
+
365
+ def _init_weights(self, module):
366
+ std = self.config.initializer_range
367
+ if isinstance(module, nn.Linear):
368
+ module.weight.data.normal_(mean=0.0, std=std)
369
+ if module.bias is not None:
370
+ module.bias.data.zero_()
371
+ elif isinstance(module, nn.Embedding):
372
+ module.weight.data.normal_(mean=0.0, std=std)
373
+ if module.padding_idx is not None:
374
+ module.weight.data[module.padding_idx].zero_()
375
+
376
+ def _set_gradient_checkpointing(self, module, value=False):
377
+ if isinstance(module, BaichuanModel):
378
+ module.gradient_checkpointing = value
379
+
380
+ class DocLLMBaseModelOutputWithPast(BaseModelOutputWithPast):
381
+ def __init__(self, *args, spatial_past_key_values=None, **kwargs):
382
+ super().__init__(*args, **kwargs)
383
+ self.spatial_past_key_values = spatial_past_key_values
384
+
385
+ class DocLLMCausalLMOutputWithPast(CausalLMOutputWithPast):
386
+ def __init__(self, *args, spatial_past_key_values=None, **kwargs):
387
+ super().__init__(*args, **kwargs)
388
+ self.spatial_past_key_values = spatial_past_key_values
389
+
390
+
391
+ class BaichuanModel(BaichuanPreTrainedModel):
392
+ def __init__(self, config: BaichuanConfig):
393
+ super().__init__(config)
394
+ self.padding_idx = config.pad_token_id
395
+ self.vocab_size = config.vocab_size
396
+
397
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
398
+ self.embed_spatial = nn.Linear(4, config.hidden_size, bias=False)
399
+ self.layers = nn.ModuleList([DecoderLayer(config) for _ in range(config.num_hidden_layers)])
400
+ self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
401
+
402
+ self.gradient_checkpointing = False
403
+ # Initialize weights and apply final processing
404
+ self.post_init()
405
+
406
+ def get_input_embeddings(self):
407
+ return self.embed_tokens
408
+
409
+ def set_input_embeddings(self, value):
410
+ self.embed_tokens = value
411
+
412
+ # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
413
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
414
+ # create causal mask
415
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
416
+ combined_attention_mask = None
417
+ if input_shape[-1] > 1:
418
+ combined_attention_mask = _make_causal_mask(
419
+ input_shape,
420
+ inputs_embeds.dtype,
421
+ device=inputs_embeds.device,
422
+ past_key_values_length=past_key_values_length,
423
+ )
424
+
425
+ if attention_mask is not None:
426
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
427
+ expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
428
+ inputs_embeds.device
429
+ )
430
+ combined_attention_mask = (
431
+ expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
432
+ )
433
+
434
+ return combined_attention_mask
435
+
436
+ def forward(
437
+ self,
438
+ input_ids: torch.LongTensor = None,
439
+ input_coordinates: torch.FloatTensor = None,
440
+ attention_mask: Optional[torch.Tensor] = None,
441
+ position_ids: Optional[torch.LongTensor] = None,
442
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
443
+ spatial_past_key_values: Optional[List[torch.FloatTensor]] = None,
444
+ inputs_embeds: Optional[torch.FloatTensor] = None,
445
+ use_cache: Optional[bool] = None,
446
+ output_attentions: Optional[bool] = None,
447
+ output_hidden_states: Optional[bool] = None,
448
+ return_dict: Optional[bool] = None,
449
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
450
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
451
+ output_hidden_states = (
452
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
453
+ )
454
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
455
+
456
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
457
+
458
+ # retrieve input_ids and inputs_embeds
459
+ if input_ids is not None and inputs_embeds is not None:
460
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
461
+ elif input_ids is not None:
462
+ batch_size, seq_length = input_ids.shape
463
+ elif inputs_embeds is not None:
464
+ batch_size, seq_length, _ = inputs_embeds.shape
465
+ else:
466
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
467
+ assert input_ids.device == input_coordinates.device
468
+
469
+ seq_length_with_past = seq_length
470
+ past_key_values_length = 0
471
+
472
+ spatial_seq_length_with_past = seq_length
473
+ spatial_past_key_values_length = 0
474
+
475
+ if past_key_values is not None:
476
+ past_key_values_length = past_key_values[0][0].shape[2]
477
+ seq_length_with_past = seq_length_with_past + past_key_values_length
478
+
479
+ if spatial_past_key_values is not None:
480
+ spatial_past_key_values_length = spatial_past_key_values[0][0].shape[2]
481
+ spatial_seq_length_with_past = spatial_seq_length_with_past + spatial_past_key_values_length
482
+
483
+ if position_ids is None:
484
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
485
+ position_ids = torch.arange(
486
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
487
+ )
488
+ position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
489
+ else:
490
+ position_ids = position_ids.view(-1, seq_length).long()
491
+
492
+ if inputs_embeds is None:
493
+ inputs_embeds = self.embed_tokens(input_ids)
494
+
495
+ coordinate_embeds = self.embed_spatial(input_coordinates)
496
+
497
+
498
+ # embed positions
499
+ if attention_mask is None:
500
+ attention_mask = torch.ones(
501
+ (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
502
+ )
503
+ attention_mask = self._prepare_decoder_attention_mask(
504
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
505
+ )
506
+
507
+ hidden_states = inputs_embeds
508
+
509
+ if self.gradient_checkpointing and self.training:
510
+ if use_cache:
511
+ logger.warning_once(
512
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
513
+ )
514
+ use_cache = False
515
+
516
+ # decoder layers
517
+ all_hidden_states = () if output_hidden_states else None
518
+ all_self_attns = () if output_attentions else None
519
+ next_decoder_cache = () if use_cache else None
520
+ spatial_next_decoder_cache = () if use_cache else None
521
+
522
+ for idx, decoder_layer in enumerate(self.layers):
523
+ if output_hidden_states:
524
+ all_hidden_states += (hidden_states,)
525
+
526
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
527
+ spatial_past_key_value = spatial_past_key_values[idx] if spatial_past_key_values is not None else None
528
+
529
+ if self.gradient_checkpointing and self.training:
530
+
531
+ def create_custom_forward(module):
532
+ def custom_forward(*inputs):
533
+ # None for past_key_value
534
+ return module(*inputs, output_attentions, None)
535
+
536
+ return custom_forward
537
+
538
+ layer_outputs = torch.utils.checkpoint.checkpoint(
539
+ create_custom_forward(decoder_layer),
540
+ hidden_states,
541
+ coordinate_embeds,
542
+ attention_mask,
543
+ position_ids,
544
+ None,
545
+ )
546
+ else:
547
+ layer_outputs = decoder_layer(
548
+ hidden_states,
549
+ coordinate_embeds,
550
+ attention_mask=attention_mask,
551
+ position_ids=position_ids,
552
+ past_key_value=past_key_value,
553
+ spatial_past_key_value=spatial_past_key_value,
554
+ output_attentions=output_attentions,
555
+ use_cache=use_cache,
556
+ )
557
+
558
+ hidden_states = layer_outputs[0]
559
+
560
+ if use_cache:
561
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
562
+ spatial_next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
563
+
564
+ if output_attentions:
565
+ all_self_attns += (layer_outputs[1],)
566
+
567
+ hidden_states = self.norm(hidden_states)
568
+
569
+ # add hidden states from the last decoder layer
570
+ if output_hidden_states:
571
+ all_hidden_states += (hidden_states,)
572
+
573
+ next_cache = next_decoder_cache if use_cache else None
574
+ spatial_next_cache = spatial_next_decoder_cache if use_cache else None
575
+
576
+ if not return_dict:
577
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
578
+ return DocLLMBaseModelOutputWithPast(
579
+ last_hidden_state=hidden_states,
580
+ past_key_values=next_cache,
581
+ spatial_past_key_values=spatial_next_cache,
582
+ hidden_states=all_hidden_states,
583
+ attentions=all_self_attns,
584
+ )
585
+
586
+
587
+ class NormHead(nn.Module):
588
+ def __init__(self, hidden_size, vocab_size, bias=False):
589
+ super().__init__()
590
+ self.weight = nn.Parameter(torch.empty((vocab_size, hidden_size)))
591
+ nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
592
+ self.first_flag = True
593
+
594
+ def forward(self, hidden_states):
595
+ if self.training:
596
+ norm_weight = nn.functional.normalize(self.weight)
597
+ elif self.first_flag:
598
+ self.first_flag = False
599
+ self.weight = nn.Parameter(nn.functional.normalize(self.weight))
600
+ norm_weight = self.weight
601
+ else:
602
+ norm_weight = self.weight
603
+ return nn.functional.linear(hidden_states, norm_weight)
604
+
605
+ _init_weights = True
606
+ @contextmanager
607
+ def no_init_weights(_enable=True):
608
+ global _init_weights
609
+ old_init_weights = _init_weights
610
+ if _enable:
611
+ _init_weights = False
612
+ try:
613
+ yield
614
+ finally:
615
+ _init_weights = old_init_weights
616
+
617
+ class BaichuanForCausalLM(BaichuanPreTrainedModel):
618
+ def __init__(self, config, *model_args, **model_kwargs):
619
+ super().__init__(config, *model_args, **model_kwargs)
620
+ self.model = BaichuanModel(config)
621
+
622
+ self.lm_head = NormHead(config.hidden_size, config.vocab_size, bias=False)
623
+ if hasattr(config, "quantization_config") and config.quantization_config['load_in_4bit']:
624
+ try:
625
+ from .quantizer import quantize_offline, init_model_weight_int4
626
+ except ImportError:
627
+ raise ImportError(f"Needs QLinear to run quantize.")
628
+ quantize_offline(self, 4)
629
+ # Initialize weights and apply final processing
630
+ self.post_init()
631
+
632
+ def get_input_embeddings(self):
633
+ return self.model.embed_tokens
634
+
635
+ def set_input_embeddings(self, value):
636
+ self.model.embed_tokens = value
637
+
638
+ def get_output_embeddings(self):
639
+ return self.lm_head
640
+
641
+ def set_output_embeddings(self, new_embeddings):
642
+ self.lm_head = new_embeddings
643
+
644
+ def set_decoder(self, decoder):
645
+ self.model = decoder
646
+
647
+ def get_decoder(self):
648
+ return self.model
649
+
650
+ @classmethod
651
+ def from_pretrained(
652
+ cls,
653
+ pretrained_model_name_or_path: Optional[Union[str, os.PathLike]],
654
+ *model_args,
655
+ config: Optional[Union[PretrainedConfig, str, os.PathLike]] = None,
656
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
657
+ ignore_mismatched_sizes: bool = False,
658
+ force_download: bool = False,
659
+ local_files_only: bool = False,
660
+ token: Optional[Union[str, bool]] = None,
661
+ revision: str = "main",
662
+ use_safetensors: bool = None,
663
+ **kwargs,
664
+ ):
665
+ # Load config if we don't provide a configuration
666
+ if not isinstance(config, PretrainedConfig):
667
+ config_path = config if config is not None else pretrained_model_name_or_path
668
+ config, model_kwargs = cls.config_class.from_pretrained(
669
+ config_path,
670
+ cache_dir=cache_dir,
671
+ return_unused_kwargs=True,
672
+ force_download=force_download,
673
+ resume_download=False,
674
+ proxies=None,
675
+ local_files_only=local_files_only,
676
+ token=token,
677
+ revision=revision,
678
+ subfolder="",
679
+ _from_auto=False,
680
+ _from_pipeline=None,
681
+ **kwargs,
682
+ )
683
+ else:
684
+ model_kwargs = kwargs
685
+
686
+ if hasattr(config, "quantization_config") and config.quantization_config['load_in_4bit']:
687
+ try:
688
+ from .quantizer import init_model_weight_int4
689
+ from accelerate import init_empty_weights, dispatch_model, infer_auto_device_map
690
+ from accelerate.utils import CustomDtype
691
+ from accelerate.utils import get_balanced_memory
692
+ except ImportError:
693
+ raise ImportError(f"Needs import model weight init func to run quantize.")
694
+ # Instantiate model.
695
+ init_contexts = [no_init_weights(_enable=True)]
696
+ init_contexts.append(init_empty_weights())
697
+ with ContextManagers(init_contexts):
698
+ model = cls(config)
699
+
700
+ model_file = os.path.join(pretrained_model_name_or_path, 'pytorch_model.bin')
701
+ state_dict = torch.load(model_file, map_location="cpu")
702
+ model.is_quantized = True
703
+
704
+ device_map = kwargs.pop("device_map", None)
705
+ torch_dtype = kwargs.pop("torch_dtype", None)
706
+
707
+ kwargs = {"no_split_module_classes": model._no_split_modules}
708
+ target_dtype = CustomDtype.INT4
709
+ max_memory = get_balanced_memory(
710
+ model,
711
+ dtype=target_dtype,
712
+ low_zero=(device_map == "balanced_low_0"),
713
+ max_memory=None,
714
+ **kwargs,
715
+ )
716
+ kwargs["max_memory"] = max_memory
717
+
718
+ device_map = infer_auto_device_map(model, dtype=target_dtype, **kwargs)
719
+ model = init_model_weight_int4(config, model, state_dict)
720
+
721
+ # Set model in evaluation mode to deactivate DropOut modules by default
722
+ model.eval()
723
+ # If it is a model with generation capabilities, attempt to load the generation config
724
+ if model.can_generate():
725
+ try:
726
+ model.generation_config = GenerationConfig.from_pretrained(
727
+ pretrained_model_name_or_path,
728
+ cache_dir=cache_dir,
729
+ force_download=force_download,
730
+ resume_download=False,
731
+ proxies=None,
732
+ local_files_only=local_files_only,
733
+ token=token,
734
+ revision=revision,
735
+ subfolder="",
736
+ _from_auto=False,
737
+ _from_pipeline=None,
738
+ **kwargs,
739
+ )
740
+ except (OSError, TypeError):
741
+ logger.info(
742
+ "Generation config file not found, using a generation config created from the model config."
743
+ )
744
+ pass
745
+
746
+ if device_map is not None:
747
+ dispatch_model(model, device_map=device_map)
748
+
749
+ return model
750
+ return super(BaichuanForCausalLM, cls).from_pretrained(pretrained_model_name_or_path, *model_args,
751
+ config=config, cache_dir=cache_dir, ignore_mismatched_sizes=ignore_mismatched_sizes,
752
+ force_download=force_download, local_files_only=local_files_only, token=token, revision=revision,
753
+ use_safetensors=use_safetensors, **kwargs)
754
+
755
+ def forward(
756
+ self,
757
+ input_ids: torch.LongTensor = None,
758
+ input_coordinates: torch.FloatTensor = None,
759
+ attention_mask: Optional[torch.Tensor] = None,
760
+ position_ids: Optional[torch.LongTensor] = None,
761
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
762
+ spatial_past_key_values: Optional[List[torch.FloatTensor]] = None,
763
+ inputs_embeds: Optional[torch.FloatTensor] = None,
764
+ labels: Optional[torch.LongTensor] = None,
765
+ use_cache: Optional[bool] = None,
766
+ output_attentions: Optional[bool] = None,
767
+ output_hidden_states: Optional[bool] = None,
768
+ return_dict: Optional[bool] = None,
769
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
770
+
771
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
772
+ output_hidden_states = (
773
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
774
+ )
775
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
776
+
777
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
778
+ outputs = self.model(
779
+ input_ids=input_ids,
780
+ input_coordinates=input_coordinates,
781
+ attention_mask=attention_mask,
782
+ position_ids=position_ids,
783
+ past_key_values=past_key_values,
784
+ spatial_past_key_values=spatial_past_key_values,
785
+ inputs_embeds=inputs_embeds,
786
+ use_cache=use_cache,
787
+ output_attentions=output_attentions,
788
+ output_hidden_states=output_hidden_states,
789
+ return_dict=return_dict,
790
+ )
791
+
792
+ hidden_states = outputs[0]
793
+ logits = self.lm_head(hidden_states)
794
+ loss = None
795
+ if labels is not None:
796
+ # Shift so that tokens < n predict n
797
+ shift_logits = logits[..., :-1, :].contiguous()
798
+ shift_labels = labels[..., 1:].contiguous()
799
+ # Flatten the tokens
800
+ loss_fct = CrossEntropyLoss()
801
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
802
+ shift_labels = shift_labels.view(-1)
803
+ # Enable model parallelism
804
+ shift_labels = shift_labels.to(shift_logits.device)
805
+ loss = loss_fct(shift_logits, shift_labels)
806
+
807
+ if not return_dict:
808
+ output = (logits,) + outputs[1:]
809
+ return (loss,) + output if loss is not None else output
810
+
811
+ return DocLLMCausalLMOutputWithPast(
812
+ loss=loss,
813
+ logits=logits,
814
+ past_key_values=outputs.past_key_values,
815
+ spatial_past_key_values=outputs.spatial_past_key_values,
816
+ hidden_states=outputs.hidden_states,
817
+ attentions=outputs.attentions,
818
+ )
819
+
820
+ def prepare_inputs_for_generation(
821
+ self, input_ids, past_key_values=None, spatial_past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
822
+ ):
823
+ if past_key_values:
824
+ input_ids = input_ids[:, -1:]
825
+
826
+ position_ids = kwargs.get("position_ids", None)
827
+ if attention_mask is not None and position_ids is None:
828
+ # create position_ids on the fly for batch generation
829
+ position_ids = attention_mask.long().cumsum(-1) - 1
830
+ position_ids.masked_fill_(attention_mask == 0, 1)
831
+ if past_key_values:
832
+ position_ids = position_ids[:, -1].unsqueeze(-1)
833
+
834
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
835
+ if inputs_embeds is not None and past_key_values is None:
836
+ model_inputs = {"inputs_embeds": inputs_embeds}
837
+ else:
838
+ model_inputs = {"input_ids": input_ids}
839
+
840
+ model_inputs.update(
841
+ {
842
+ "position_ids": position_ids,
843
+ "past_key_values": past_key_values,
844
+ "spatial_past_key_values":spatial_past_key_values,
845
+ "use_cache": kwargs.get("use_cache"),
846
+ "attention_mask": attention_mask,
847
+ }
848
+ )
849
+ return model_inputs
850
+
851
+ @staticmethod
852
+ def _reorder_cache(past_key_values, beam_idx):
853
+ reordered_past = ()
854
+ for layer_past in past_key_values:
855
+ reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
856
+ return reordered_past
857
+
858
+ def quantize(self, bits: int):
859
+ try:
860
+ from .quantizer import quantize_online
861
+ except ImportError:
862
+ raise ImportError(f"Needs QLinear to run quantize.")
863
+ return quantize_online(self, bits)
864
+
865
+ def chat(self, tokenizer, messages: List[dict], stream=False,
866
+ generation_config: Optional[GenerationConfig]=None):
867
+ generation_config = generation_config or self.generation_config
868
+ input_ids = build_chat_input(self, tokenizer, messages, generation_config.max_new_tokens)
869
+ if stream:
870
+ streamer = TextIterStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
871
+ Thread(target=self.generate, kwargs=dict(
872
+ inputs=input_ids, streamer=streamer,
873
+ generation_config=generation_config,
874
+ )).start()
875
+ return streamer
876
+ else:
877
+ outputs = self.generate(input_ids, generation_config=generation_config)
878
+ response = tokenizer.decode(outputs[0][len(input_ids[0]):], skip_special_tokens=True)
879
+ return response
quantizer.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import bitsandbytes as bnb
2
+ from bitsandbytes.nn.modules import Params4bit, Int8Params
3
+ import torch
4
+
5
+ def Params4bitCuda(self, device):
6
+ self.data = self.data.cuda(device)
7
+ self.quant_state[0] = self.quant_state[0].cuda(device)
8
+ self.quant_state[4][0] = self.quant_state[4][0].cuda(device)
9
+ self.quant_state[4][1][0] = self.quant_state[4][1][0].cuda(device)
10
+ self.quant_state[4][1][1] = self.quant_state[4][1][1].cuda(device)
11
+
12
+ self.quant_state[6] = self.quant_state[6].cuda(device)
13
+ return self
14
+
15
+ class Linear4bitOnline(torch.nn.Module):
16
+ def __init__(self, weight, bias, quant_type):
17
+ super().__init__()
18
+ self.weight = Params4bit(
19
+ weight.data, requires_grad=False, compress_statistics=True, quant_type=quant_type
20
+ )
21
+ self.compute_dtype = None
22
+ #self.weight.cuda(weight.device)
23
+ self.bias = bias
24
+
25
+ def forward(self, x: torch.Tensor):
26
+ # weights are cast automatically as Int8Params, but the bias has to be cast manually
27
+ if self.bias is not None and self.bias.dtype != x.dtype:
28
+ self.bias.data = self.bias.data.to(x.dtype)
29
+
30
+ if getattr(self.weight, "quant_state", None) is None:
31
+ print(
32
+ "FP4 quantization state not initialized. Please call .cuda() or .to(device) on the LinearFP4 layer first."
33
+ )
34
+ inp_dtype = x.dtype
35
+ if self.compute_dtype is not None:
36
+ x = x.to(self.compute_dtype)
37
+
38
+ bias = None if self.bias is None else self.bias.to(self.compute_dtype)
39
+ out = bnb.matmul_4bit(
40
+ x, self.weight.t(), bias=bias, quant_state=self.weight.quant_state
41
+ )
42
+
43
+ out = out.to(inp_dtype)
44
+
45
+ return out
46
+
47
+ class Linear8bitLtOnline(torch.nn.Module):
48
+ def __init__(
49
+ self,
50
+ weight,
51
+ bias,
52
+ has_fp16_weights=True,
53
+ memory_efficient_backward=False,
54
+ threshold=0.0,
55
+ index=None,
56
+ ):
57
+ super().__init__()
58
+ assert (
59
+ not memory_efficient_backward
60
+ ), "memory_efficient_backward is no longer required and the argument is deprecated in 0.37.0 and will be removed in 0.39.0"
61
+ self.state = bnb.MatmulLtState()
62
+ self.index = index
63
+
64
+ # Necessary for stacked layers
65
+ self.state.threshold = threshold
66
+ self.state.has_fp16_weights = has_fp16_weights
67
+ self.state.memory_efficient_backward = memory_efficient_backward
68
+ if threshold > 0.0 and not has_fp16_weights:
69
+ self.state.use_pool = True
70
+
71
+ self.weight = Int8Params(
72
+ weight.data,
73
+ has_fp16_weights=has_fp16_weights,
74
+ requires_grad=has_fp16_weights,
75
+ )
76
+ self.bias = bias
77
+
78
+ def init_8bit_state(self):
79
+ self.state.CB = self.weight.CB
80
+ self.state.SCB = self.weight.SCB
81
+ self.weight.CB = None
82
+ self.weight.SCB = None
83
+
84
+ def forward(self, x: torch.Tensor):
85
+ self.state.is_training = self.training
86
+ if self.weight.CB is not None:
87
+ self.init_8bit_state()
88
+
89
+ # weights are cast automatically as Int8Params, but the bias has to be cast manually
90
+ if self.bias is not None and self.bias.dtype != x.dtype:
91
+ self.bias.data = self.bias.data.to(x.dtype)
92
+
93
+ out = bnb.matmul(x, self.weight, bias=self.bias, state=self.state)
94
+
95
+ if not self.state.has_fp16_weights:
96
+ if self.state.CB is not None and self.state.CxB is not None:
97
+ # we converted 8-bit row major to turing/ampere format in the first inference pass
98
+ # we no longer need the row-major weight
99
+ del self.state.CB
100
+ self.weight.data = self.state.CxB
101
+ return out
102
+
103
+ def quantize_offline(model, bits: int):
104
+ assert (bits == 4), f'bits: {bits} is not supported'
105
+
106
+ for i, layer in enumerate(model.model.layers):
107
+ layer.self_attn.W_pack = bnb.nn.Linear4bit(
108
+ layer.self_attn.W_pack.weight.shape[1],
109
+ layer.self_attn.W_pack.weight.shape[0],
110
+ False,
111
+ torch.float16,
112
+ compress_statistics=True,
113
+ quant_type="nf4",
114
+ )
115
+ layer.self_attn.o_proj = bnb.nn.Linear4bit(
116
+ layer.self_attn.o_proj.weight.shape[1],
117
+ layer.self_attn.o_proj.weight.shape[0],
118
+ False,
119
+ torch.float16,
120
+ compress_statistics=True,
121
+ quant_type="nf4",
122
+ )
123
+
124
+ layer.mlp.gate_proj = bnb.nn.Linear4bit(
125
+ layer.mlp.gate_proj.weight.shape[1],
126
+ layer.mlp.gate_proj.weight.shape[0],
127
+ False,
128
+ torch.float16,
129
+ compress_statistics=True,
130
+ quant_type="nf4",
131
+ )
132
+ layer.mlp.down_proj = bnb.nn.Linear4bit(
133
+ layer.mlp.down_proj.weight.shape[1],
134
+ layer.mlp.down_proj.weight.shape[0],
135
+ False,
136
+ torch.float16,
137
+ compress_statistics=True,
138
+ quant_type="nf4",
139
+ )
140
+ layer.mlp.up_proj = bnb.nn.Linear4bit(
141
+ layer.mlp.up_proj.weight.shape[1],
142
+ layer.mlp.up_proj.weight.shape[0],
143
+ False,
144
+ torch.float16,
145
+ compress_statistics=True,
146
+ quant_type="nf4",
147
+ )
148
+ return model
149
+
150
+ def quantize_online(model, bits: int):
151
+ def quant(weight, bias=None):
152
+ if bits == 8:
153
+ linear = Linear8bitLtOnline(
154
+ weight,
155
+ bias,
156
+ has_fp16_weights=False,
157
+ threshold=6.0,
158
+ )
159
+ if bias is not None:
160
+ linear.bias = torch.nn.Parameter(bias)
161
+ elif bits == 4:
162
+ linear = Linear4bitOnline(
163
+ weight,
164
+ bias,
165
+ quant_type="nf4", #fp4/nf4
166
+ )
167
+ else:
168
+ raise ValueError("quantize only support 4/8 bit")
169
+ return linear
170
+
171
+ for i, layer in enumerate(model.model.layers):
172
+ layer.self_attn.W_pack = quant(layer.self_attn.W_pack.weight)
173
+ layer.self_attn.o_proj = quant(layer.self_attn.o_proj.weight)
174
+ layer.mlp.gate_proj = quant(layer.mlp.gate_proj.weight)
175
+ layer.mlp.down_proj = quant(layer.mlp.down_proj.weight)
176
+ layer.mlp.up_proj = quant(layer.mlp.up_proj.weight)
177
+ return model
178
+
179
+ def init_model_weight_int4(config, model, state_dict):
180
+ #replace Params4bit.cuda with Params4bitCuda
181
+ Params4bit.cuda = Params4bitCuda
182
+
183
+ for i in range(config.num_hidden_layers):
184
+ weight_data = state_dict[f'model.layers.{i}.self_attn.W_pack.weight.data']
185
+ weight_quant_state = state_dict[f'model.layers.{i}.self_attn.W_pack.weight.quant_state']
186
+ model.model.layers[i].self_attn.W_pack.weight = Params4bit(weight_data, requires_grad=False, quant_state=weight_quant_state)
187
+
188
+ weight_data = state_dict[f'model.layers.{i}.self_attn.o_proj.weight.data']
189
+ weight_quant_state = state_dict[f'model.layers.{i}.self_attn.o_proj.weight.quant_state']
190
+ model.model.layers[i].self_attn.o_proj.weight = Params4bit(weight_data, requires_grad=False, quant_state=weight_quant_state)
191
+
192
+ weight_data = state_dict[f'model.layers.{i}.mlp.gate_proj.weight.data']
193
+ weight_quant_state = state_dict[f'model.layers.{i}.mlp.gate_proj.weight.quant_state']
194
+ model.model.layers[i].mlp.gate_proj.weight = Params4bit(weight_data, requires_grad=False, quant_state=weight_quant_state)
195
+
196
+ weight_data = state_dict[f'model.layers.{i}.mlp.up_proj.weight.data']
197
+ weight_quant_state = state_dict[f'model.layers.{i}.mlp.up_proj.weight.quant_state']
198
+ model.model.layers[i].mlp.up_proj.weight = Params4bit(weight_data, requires_grad=False, quant_state=weight_quant_state)
199
+
200
+ weight_data = state_dict[f'model.layers.{i}.mlp.down_proj.weight.data']
201
+ weight_quant_state = state_dict[f'model.layers.{i}.mlp.down_proj.weight.quant_state']
202
+ model.model.layers[i].mlp.down_proj.weight = Params4bit(weight_data, requires_grad=False, quant_state=weight_quant_state)
203
+
204
+ model.model.layers[i].input_layernorm.weight = state_dict[f'model.layers.{i}.input_layernorm.weight']
205
+ model.model.layers[i].post_attention_layernorm.weight = state_dict[f'model.layers.{i}.post_attention_layernorm.weight']
206
+
207
+ model.model.embed_tokens.weight = state_dict['model.embed_tokens.weight']
208
+ model.model.norm.weight = state_dict['model.norm.weight']
209
+ model.lm_head.weight = state_dict['lm_head.weight']
210
+ return model