chatglm-6b-int4 / demo_call_cpu_model.py
NewBreaker's picture
auto git
3f712ba
raw
history blame contribute delete
478 Bytes
from transformers import AutoTokenizer, AutoModel
kernel_file = ".\\models\\quantization_kernels.so"
tokenizer = AutoTokenizer.from_pretrained(".\\", trust_remote_code=True)
model = AutoModel.from_pretrained(".\\", trust_remote_code=True).float()
# model = model.quantize(bits=4, kernel_file=kernel_file)
# response, history = model.chat(tokenizer, "你好", history=[])
# print("response:", response)
tokenizer.save_pretrained("models")
model.save_pretrained("models")