File size: 504 Bytes
47b54c6
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
from transformers import AutoTokenizer, AutoModel

tokenizer = AutoTokenizer.from_pretrained(".\\models\\chatglm-6b-int4", trust_remote_code=True, revision="")
model = AutoModel.from_pretrained(".\\models\\chatglm-6b-int4", trust_remote_code=True, revision="").half().cuda()
kernel_file = ".\\models\\chatglm-6b-int4\\quantization_kernels.so"

model = model.quantize(bits=4,kernel_file=kernel_file)
model = model.eval()


response, history = model.chat(tokenizer, "你好", history=[])
print(response)