BAAI
/

shunxing1234 commited on
Commit
1fe84ee
1 Parent(s): 3554f56

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +100 -24
README.md CHANGED
@@ -61,33 +61,109 @@ with torch.no_grad():
61
  print(out)
62
  ```
63
 
64
- 利用NBCE进行推理
65
 
66
  ```python
67
- from transformers import AutoTokenizer, AutoModelForCausalLM
68
  import torch
69
- from cyg_conversation import covert_prompt_to_input_ids_with_history
70
-
71
- tokenizer = AutoTokenizer.from_pretrained("BAAI/AquilaChat-7B")
72
- model = AutoModelForCausalLM.from_pretrained("BAAI/AquilaChat-7B")
73
- model.eval()
74
- model.to("cuda:0")
75
- vocab = tokenizer.vocab
76
- print(len(vocab))
77
-
78
- text = "请给出10个要到北京旅游的理由。"
79
-
80
- tokens = covert_prompt_to_input_ids_with_history(text, history=[], tokenizer=tokenizer, max_token=512)
81
-
82
- tokens = torch.tensor(tokens)[None,].to("cuda:0")
83
-
84
-
85
- with torch.no_grad():
86
- out = model.generate(tokens, do_sample=True, max_length=512, eos_token_id=100007)[0]
87
-
88
- out = tokenizer.decode(out.cpu().numpy().tolist())
89
-
90
- print(out)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
  ```
92
 
93
 
 
61
  print(out)
62
  ```
63
 
64
+ 利用[NBCE](https://github.com/bojone/NBCE/tree/main)进行推理
65
 
66
  ```python
67
+ import json
68
  import torch
69
+ from transformers import AutoTokenizer
70
+ from transformers import AutoModelForCausalLM
71
+ from transformers import TopPLogitsWarper, LogitsProcessorList
72
+ import pdb
73
+
74
+ # 加载tokenizer
75
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
76
+ tokenizer.padding_side = 'left'
77
+ tokenizer.pad_token = tokenizer.unk_token
78
+
79
+ # 加载Aquila模型
80
+ model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.float16)
81
+ device = torch.device('cuda')
82
+ model.to(device)
83
+ # 加载示例Context
84
+ from cyg_conversation import default_conversation
85
+
86
+ conv = default_conversation.copy()
87
+ contexts = json.load(open('code_text_2.json'))
88
+
89
+ question = "请解释这段程序的功能:"
90
+ batch = []
91
+ conv.append_message(conv.roles[0], question)
92
+ conv.append_message(conv.roles[1], None)
93
+ batch.append(conv.get_prompt())
94
+ # 拼接context和question
95
+ for ci,context in enumerate(contexts):
96
+ conv1 = default_conversation.copy()
97
+ conv1.append_message(conv.roles[0], context+question)
98
+ conv1.append_message(conv.roles[1], None)
99
+ batch.append(conv1.get_prompt())
100
+ print('Context长度分布:', [len(text) for text in batch])
101
+ print('Context总长度:', sum([len(text) for text in batch]))
102
+
103
+ # Top-P截断
104
+ processors = LogitsProcessorList()
105
+ processors.append(TopPLogitsWarper(0.95))
106
+
107
+ # Copied from https://github.com/bojone/NBCE/blob/main/test.py#L51-L106
108
+ @torch.inference_mode()
109
+ def generate(max_tokens):
110
+ """Naive Bayes-based Context Extension 演示代码
111
+ """
112
+ inputs = tokenizer(batch, padding='longest', return_tensors='pt').to(device)
113
+ input_ids = inputs.input_ids
114
+ attention_mask = inputs.attention_mask
115
+
116
+ print('input_ids', input_ids.shape)
117
+ past_key_values = None
118
+ n = input_ids.shape[0]
119
+
120
+ for i in range(max_tokens):
121
+ # 模型输出
122
+ outputs = model(input_ids=input_ids,
123
+ attention_mask=attention_mask,
124
+ return_dict=True,
125
+ use_cache=True,
126
+ past_key_values=past_key_values
127
+ )
128
+ past_key_values = outputs.past_key_values
129
+
130
+ # ===== 核心代码开始 =====
131
+ beta, eta = 0.25, 0.1
132
+ logits = outputs.logits[:, -1]
133
+ logits = logits - logits.logsumexp(dim=-1, keepdims=True)
134
+ logits = processors(input_ids, logits)
135
+ entropy = -(logits.exp() * logits.clip(-100, 0)).sum(dim=-1)
136
+ if i > 0:
137
+ entropy[k] -= eta
138
+ k = entropy[1:].argmin() + 1
139
+ logits_max = logits[k]
140
+ logits_uncond = logits[0]
141
+ logits_merged = (1 + beta) * logits_max - beta * logits_uncond
142
+ logits = torch.where(logits_uncond > -100, logits_merged, logits_max)
143
+ # ===== 核心代码结束 =====
144
+
145
+ # 构建分布,采样
146
+ # tau = 1是标准的随机采样,tau->0则是贪心搜索
147
+ # 简单起见,这里没有实现topk、topp截断
148
+ tau = 0.01
149
+ probas = torch.nn.functional.softmax(logits[None] / tau , dim=-1)
150
+ next_tokens = torch.multinomial(probas, num_samples=1).squeeze(1)
151
+ if next_tokens[0] == tokenizer.eos_token_id:
152
+ break
153
+
154
+ ret = tokenizer.batch_decode(next_tokens)
155
+ print(ret[0], flush=True, end='')
156
+
157
+ # prepare for next iteration
158
+ input_ids = next_tokens.unsqueeze(-1).tile(n, 1)
159
+ attention_mask = torch.cat([attention_mask, torch.ones(n, 1, dtype=torch.long, device=device)], dim=-1)
160
+
161
+
162
+ if __name__ == '__main__':
163
+ generate(1000)
164
+
165
+
166
+ """
167
  ```
168
 
169