Update app.py
Browse files
app.py
CHANGED
@@ -15,8 +15,6 @@ from langchain.chains.conversation.prompt import ENTITY_MEMORY_CONVERSATION_TEMP
|
|
15 |
|
16 |
from langchain import LLMChain
|
17 |
|
18 |
-
memory = ConversationBufferMemory(memory_key="chat_history")
|
19 |
-
|
20 |
persist_directory="db"
|
21 |
llm=OpenAI(model_name = "text-davinci-003", temperature=0)
|
22 |
model_name = "hkunlp/instructor-large"
|
@@ -24,7 +22,7 @@ embed_instruction = "Represent the text from the BMW website for retrieval"
|
|
24 |
query_instruction = "Query the most relevant text from the BMW website"
|
25 |
embeddings = HuggingFaceInstructEmbeddings(model_name=model_name, embed_instruction=embed_instruction, query_instruction=query_instruction)
|
26 |
vectordb = Chroma(persist_directory=persist_directory, embedding_function=embeddings)
|
27 |
-
chain = RetrievalQAWithSourcesChain.from_chain_type(llm, chain_type="stuff", retriever=vectordb.as_retriever()
|
28 |
|
29 |
def chat(message, history):
|
30 |
history = history or []
|
|
|
15 |
|
16 |
from langchain import LLMChain
|
17 |
|
|
|
|
|
18 |
persist_directory="db"
|
19 |
llm=OpenAI(model_name = "text-davinci-003", temperature=0)
|
20 |
model_name = "hkunlp/instructor-large"
|
|
|
22 |
query_instruction = "Query the most relevant text from the BMW website"
|
23 |
embeddings = HuggingFaceInstructEmbeddings(model_name=model_name, embed_instruction=embed_instruction, query_instruction=query_instruction)
|
24 |
vectordb = Chroma(persist_directory=persist_directory, embedding_function=embeddings)
|
25 |
+
chain = RetrievalQAWithSourcesChain.from_chain_type(llm, chain_type="stuff", retriever=vectordb.as_retriever())
|
26 |
|
27 |
def chat(message, history):
|
28 |
history = history or []
|