from transformers import AutoTokenizer, AutoModel import gradio as gr tokenizer = AutoTokenizer.from_pretrained(".\\models\\chatglm-6b-int4", trust_remote_code=True, revision="") model = AutoModel.from_pretrained(".\\models\\chatglm-6b-int4", trust_remote_code=True, revision="").half().cuda().cpu() # from transformers import AutoTokenizer, AutoModel # tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) # model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) # model = model.eval() # kernel_file = "./models/chatglm-6b-int4/quantization_kernels.so" # tokenizer = AutoTokenizer.from_pretrained("./models/chatglm-6b-int4", trust_remote_code=True, revision="") # model = AutoModel.from_pretrained("./models/chatglm-6b-int4", trust_remote_code=True, revision="").half().cuda() # model = AutoModel.from_pretrained("./models/chatglm-6b-int4", trust_remote_code=True, revision="").half() # model = model.quantize(bits=model_args.quantization_bit, kernel_file=kernel_file) model = model.eval() def chat(msg): history = [] response, history = model.chat(tokenizer, msg, history=history) print("response:", response) return response iface = gr.Interface(fn=chat, inputs="text", outputs="text") iface.launch()