from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained(".\\models\\chatglm-6b-int4", trust_remote_code=True, revision="") model = AutoModel.from_pretrained(".\\models\\chatglm-6b-int4", trust_remote_code=True, revision="").half().cuda() kernel_file = ".\\models\\chatglm-6b-int4\\quantization_kernels.so" model = model.quantize(bits=4,kernel_file=kernel_file) model = model.eval() response, history = model.chat(tokenizer, "你好", history=[]) print(response)