Spaces:
Runtime error
Runtime error
import os | |
import gradio as gr | |
from langchain.vectorstores import Chroma | |
from langchain.chains import ConversationalRetrievalChain | |
from langchain.embeddings.openai import OpenAIEmbeddings | |
# convo chain lib | |
from langchain.embeddings.openai import OpenAIEmbeddings | |
from langchain.vectorstores import Chroma | |
from langchain.text_splitter import CharacterTextSplitter | |
from langchain.llms import OpenAI | |
from langchain.chains import ConversationalRetrievalChain | |
from langchain.chat_models import ChatOpenAI | |
from langchain.prompts.chat import ( | |
ChatPromptTemplate, | |
SystemMessagePromptTemplate, | |
AIMessagePromptTemplate, | |
HumanMessagePromptTemplate, | |
) | |
from langchain.schema import ( | |
AIMessage, | |
HumanMessage, | |
SystemMessage | |
) | |
OPENAI_API_KEY=os.environ["OPENAI_API_KEY"] | |
embedding = OpenAIEmbeddings() | |
vectorstore = Chroma(persist_directory='vectorstore', embedding_function=embedding) | |
retriever = vectorstore.as_retriever() | |
aisyah_template=""" | |
Answer each question truthfully using the Malaysia's Form 1 History data provided. Your answers should be concise and straight to the point. | |
For questions that are open-ended, which require subjective judgment or opinion, you may not find a definitive answer in the textbook. | |
However, you should still address the question's directive based on the data's context. Ideally, your answer should provide 3 points that support your response. | |
You are encouraged to better provide positive suggestions for concepts that are less ethical. | |
Please keep in mind that the scope of the data provided is limited to the content covered in the Malaysia's Form 1 History textbook. | |
--------------- | |
{context}""" | |
##If you don't know the answer, just say that you don't know, don't try to make up an answer. | |
system_template="""Use the following pieces of context to answer the users question. | |
---------------- | |
{context}""" | |
##If you don't know the answer, just say that you don't know, don't try to make up an answer. | |
messages = [ | |
SystemMessagePromptTemplate.from_template(aisyah_template), | |
HumanMessagePromptTemplate.from_template("{question}") | |
] | |
prompt = ChatPromptTemplate.from_messages(messages) | |
qa = ConversationalRetrievalChain.from_llm(OpenAI(temperature=0), retriever, return_source_documents=True, qa_prompt=prompt) | |
def predict(input, chat_historyhistory=[]): | |
response = qa({"question":input, "chat_history":history}) | |
return response, chat_history | |
with gr.Blocks() as demo: | |
chatbot = gr.Chatbot() | |
state = gr.State([]) | |
with gr.Row(): | |
txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(container=False) | |
txt.submit(predict, [txt, state], [chatbot, state]) | |
# txt.submit(agent_executor.run, [txt, state], [chatbot, state]) | |
demo.launch() |