The model 'BaichuanForCausalLM' is not supported for text-generation.
import torch
from langchain import PromptTemplate, LLMChain
from langchain.llms import HuggingFacePipeline
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, AutoModelForSeq2SeqLM
model_id = "baichuan-inc/Baichuan-13B-Chat"
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=False, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.float16, trust_remote_code=True)
#model = model.quantize(8).cuda()
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_length=1000
)
local_llm = HuggingFacePipeline(pipeline=pipe)
print(local_llm('What is the capital of France? '))
template = """Question: {question} Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = LLMChain(prompt=prompt, llm=local_llm)
question = "What is the capital of England?"
print(llm_chain.run(question))
使用langchain调用报错The model 'BaichuanForCausalLM' is not supported for text-generation.