fahmiaziz98 commited on
Commit
84c30b3
1 Parent(s): 38283be
apps/agent/multi_query_chain.py CHANGED
@@ -33,7 +33,5 @@ QUERY_PROMPT = PromptTemplate(
33
  input_variables=["question"],
34
  template=template,
35
  )
36
- # llm = ChatOpenAI(temperature=0, openai_api_key=OPENAI_API_KEY)
37
-
38
  # Chain
39
  llm_chain = QUERY_PROMPT | llm | output_parser
 
33
  input_variables=["question"],
34
  template=template,
35
  )
 
 
36
  # Chain
37
  llm_chain = QUERY_PROMPT | llm | output_parser
apps/agent/tools.py CHANGED
@@ -16,13 +16,14 @@ compressor = FlashrankRerank(model="ms-marco-MiniLM-L-12-v2")
16
  def multiquery_retriever(index_name: str, embeddings, compressor) -> ContextualCompressionRetriever:
17
  vectorstore = Pinecone.from_existing_index(embedding=embeddings, index_name=index_name)
18
  retriever = vectorstore.as_retriever()
19
- reranker_retriever = ContextualCompressionRetriever(
20
- base_compressor=compressor, base_retriever=retriever
 
21
  )
22
- print("Initialize MultiQuery...")
23
- return MultiQueryRetriever(
24
- retriever=reranker_retriever, llm_chain=llm_chain, parser_key="lines"
25
  )
 
26
 
27
  retriever_xano = multiquery_retriever(INDEX_NAME_XANO, embeddings, compressor)
28
  retriever_weweb = multiquery_retriever(INDEX_NAME_WEWEB, embeddings, compressor)
 
16
  def multiquery_retriever(index_name: str, embeddings, compressor) -> ContextualCompressionRetriever:
17
  vectorstore = Pinecone.from_existing_index(embedding=embeddings, index_name=index_name)
18
  retriever = vectorstore.as_retriever()
19
+
20
+ multi_retriever = MultiQueryRetriever(
21
+ retriever=retriever, llm_chain=llm_chain, parser_key="lines"
22
  )
23
+ reranker_retriever = ContextualCompressionRetriever(
24
+ base_compressor=compressor, base_retriever=multi_retriever
 
25
  )
26
+ return reranker_retriever
27
 
28
  retriever_xano = multiquery_retriever(INDEX_NAME_XANO, embeddings, compressor)
29
  retriever_weweb = multiquery_retriever(INDEX_NAME_WEWEB, embeddings, compressor)