chatglm-6b-int4 / demo_app.py
NewBreaker's picture
first
47b54c6
raw
history blame
550 Bytes
from transformers import AutoTokenizer, AutoModel
import gradio as gr
tokenizer = AutoTokenizer.from_pretrained(".\\models\\chatglm-6b-int4", trust_remote_code=True, revision="")
model = AutoModel.from_pretrained(".\\models\\chatglm-6b-int4", trust_remote_code=True, revision="").half().cuda()
model = model.eval()
def chat(msg):
history = []
response, history = model.chat(tokenizer, msg, history=history)
print("response:", response)
return response
iface = gr.Interface(fn=chat, inputs="text", outputs="text")
iface.launch()