from transformers import AutoModel, AutoTokenizer import gradio as gr import mdtex2html <<<<<<< HEAD # tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) # model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda() tokenizer = AutoTokenizer.from_pretrained("models/chatglm-6b-int4", trust_remote_code=True, revision="") model = AutoModel.from_pretrained("models/chatglm-6b-int4", trust_remote_code=True, revision="").float() ======= # tokenizer = AutoTokenizer.from_pretrained(".\\models\\chatglm-6b-int4", trust_remote_code=True, revision="") # model = AutoModel.from_pretrained(".\\models\\chatglm-6b-int4", trust_remote_code=True, revision="").half().cuda() # chatglm-6b-int4 cuda,本地可以运行成功 # tokenizer = AutoTokenizer.from_pretrained(".\\models\\chatglm-6b-int4", trust_remote_code=True, revision="") # model = AutoModel.from_pretrained(".\\models\\chatglm-6b-int4", trust_remote_code=True, revision="").half().cuda() # chatglm-6b-int4 CPU, tokenizer = AutoTokenizer.from_pretrained("models/chatglm-6b-int4", trust_remote_code=True, revision="") model = AutoModel.from_pretrained("models/chatglm-6b-int4", trust_remote_code=True, revision="").float() # chatglm-6b # kernel_file = "./models/chatglm-6b-int4/quantization_kernels.so" # tokenizer = AutoTokenizer.from_pretrained("./models/chatglm-6b-int4", trust_remote_code=True, revision="") # model = AutoModel.from_pretrained("./models/chatglm-6b-int4", trust_remote_code=True, revision="").half().cuda() # model = AutoModel.from_pretrained("./models/chatglm-6b-int4", trust_remote_code=True, revision="").float() # model = model.quantize(bits=model_args.quantization_bit, kernel_file=kernel_file) >>>>>>> a5ffac38caa4a817f0c1633254d42160662806a8 model = model.eval() """Override Chatbot.postprocess""" def postprocess(self, y): if y is None: return [] for i, (message, response) in enumerate(y): y[i] = ( None if message is None else mdtex2html.convert((message)), None if response is None else mdtex2html.convert(response), ) return y gr.Chatbot.postprocess = postprocess def parse_text(text): """copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/""" lines = text.split("\n") lines = [line for line in lines if line != ""] count = 0 for i, line in enumerate(lines): if "```" in line: count += 1 items = line.split('`') if count % 2 == 1: lines[i] = f'
'
else:
lines[i] = f'
'
else:
if i > 0:
if count % 2 == 1:
line = line.replace("`", "\`")
line = line.replace("<", "<")
line = line.replace(">", ">")
line = line.replace(" ", " ")
line = line.replace("*", "*")
line = line.replace("_", "_")
line = line.replace("-", "-")
line = line.replace(".", ".")
line = line.replace("!", "!")
line = line.replace("(", "(")
line = line.replace(")", ")")
line = line.replace("$", "$")
lines[i] = "