Spaces:
Runtime error
Runtime error
File size: 1,191 Bytes
47b54c6 a78bf18 d498a70 a78bf18 47b54c6 a78bf18 47b54c6 a78bf18 47b54c6 a78bf18 47b54c6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
from transformers import AutoTokenizer, AutoModel
import gradio as gr
# chatglm-6b-int4 cuda,本地可以运行成功
# tokenizer = AutoTokenizer.from_pretrained(".\\models\\chatglm-6b-int4", trust_remote_code=True, revision="")
# model = AutoModel.from_pretrained(".\\models\\chatglm-6b-int4", trust_remote_code=True, revision="").half().cuda()
# chatglm-6b CPU,
tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half()
# chatglm-6b
# kernel_file = "./models/chatglm-6b-int4/quantization_kernels.so"
# tokenizer = AutoTokenizer.from_pretrained("./models/chatglm-6b-int4", trust_remote_code=True, revision="")
# model = AutoModel.from_pretrained("./models/chatglm-6b-int4", trust_remote_code=True, revision="").half().cuda()
# model = model.quantize(bits=model_args.quantization_bit, kernel_file=kernel_file)
model = model.eval()
def chat(msg):
history = []
response, history = model.chat(tokenizer, msg, history=history)
print("response:", response)
return response
iface = gr.Interface(fn=chat, inputs="text", outputs="text")
iface.launch()
|