Spaces:
Sleeping
Sleeping
Upload 2 files
Browse files- app.py +1 -7
- requirements.txt +1 -7
app.py
CHANGED
@@ -21,7 +21,7 @@ from langchain_community.vectorstores import FAISS
|
|
21 |
from langchain_openai.embeddings import OpenAIEmbeddings
|
22 |
from langchain_core.documents import Document
|
23 |
from dotenv import load_dotenv
|
24 |
-
from langchain.chains import RetrievalQA
|
25 |
|
26 |
load_dotenv()
|
27 |
|
@@ -141,12 +141,6 @@ async def on_chat_start():
|
|
141 |
temperature=0
|
142 |
)
|
143 |
|
144 |
-
qa_chain = RetrievalQA(
|
145 |
-
retriever=finetune_retriever,
|
146 |
-
llm=rag_llm,
|
147 |
-
system_prompt=system_role_prompt,
|
148 |
-
user_prompt=user_role_prompt
|
149 |
-
)
|
150 |
|
151 |
finetune_rag_chain = (
|
152 |
{"context": itemgetter("question") | finetune_retriever, "question": itemgetter("question")}
|
|
|
21 |
from langchain_openai.embeddings import OpenAIEmbeddings
|
22 |
from langchain_core.documents import Document
|
23 |
from dotenv import load_dotenv
|
24 |
+
# from langchain.chains import RetrievalQA
|
25 |
|
26 |
load_dotenv()
|
27 |
|
|
|
141 |
temperature=0
|
142 |
)
|
143 |
|
|
|
|
|
|
|
|
|
|
|
|
|
144 |
|
145 |
finetune_rag_chain = (
|
146 |
{"context": itemgetter("question") | finetune_retriever, "question": itemgetter("question")}
|
requirements.txt
CHANGED
@@ -1,10 +1,5 @@
|
|
1 |
numpy
|
2 |
chainlit==0.7.700
|
3 |
-
# openai
|
4 |
-
# langchain_community
|
5 |
-
# langchain_experimental
|
6 |
-
# langchain_openai
|
7 |
-
# langchain_huggingface
|
8 |
langchain-core==0.2.40
|
9 |
langchain-openai==0.1.25
|
10 |
langchain-huggingface==0.0.3
|
@@ -12,5 +7,4 @@ pypdf
|
|
12 |
sentence_transformers
|
13 |
langchain_text_splitters
|
14 |
langchain-community
|
15 |
-
faiss-cpu
|
16 |
-
# langchain
|
|
|
1 |
numpy
|
2 |
chainlit==0.7.700
|
|
|
|
|
|
|
|
|
|
|
3 |
langchain-core==0.2.40
|
4 |
langchain-openai==0.1.25
|
5 |
langchain-huggingface==0.0.3
|
|
|
7 |
sentence_transformers
|
8 |
langchain_text_splitters
|
9 |
langchain-community
|
10 |
+
faiss-cpu
|
|