chat-your-case / query_data.py
danielcwq's picture
Update query_data.py
6779167
raw
history blame
2.15 kB
from langchain.prompts.prompt import PromptTemplate
from langchain.llms import OpenAI
from langchain.chains import ChatVectorDBChain
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
system_template = """Use the following pieces of context to answer the user's question. Use only the context to answer the question.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
----------------
{context}"""
messages = [
SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template("{question}")
]
prompt = ChatPromptTemplate.from_messages(messages)
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
You can assume the question about the law case between Review Publishing Company Limited and Lee Hsien Loong in Singapore.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
#template = """You are an AI assistant for answering questions about the law case.
#You are given the following extracted parts of a long document and a question. Provide a conversational answer.
#If you don't know the answer, just say "Hmm, I'm not sure." Don't try to make up an answer.
#If the question is not about law, politely inform them that you are tuned to only answer questions about it.
#Question: {question}
#=========
#{context}
#=========
#Answer in Markdown:"""
#QA_PROMPT = PromptTemplate(template=template, input_variables=["question", "context"])
prompt = ChatPromptTemplate.from_messages(messages)
def get_chain(vectorstore):
llm = ChatOpenAI(temperature=0)
qa_chain = ChatVectorDBChain.from_llm(
llm,
vectorstore,
qa_prompt=prompt,
condense_question_prompt = CONDENSE_QUESTION_PROMPT
)
return qa_chain