Spaces:
Sleeping
Sleeping
Duplicate from OrganizedProgrammers/PDFAISS-2.3.1
Browse filesCo-authored-by: Alma atla <[email protected]>
- .gitattributes +34 -0
- README.md +13 -0
- app.py +355 -0
- requirements.txt +8 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: PDFAISS 2.3
|
3 |
+
emoji: 🏃
|
4 |
+
colorFrom: red
|
5 |
+
colorTo: green
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.29.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
duplicated_from: OrganizedProgrammers/PDFAISS-2.3.1
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,355 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import langchain
|
2 |
+
from langchain.embeddings import SentenceTransformerEmbeddings
|
3 |
+
from langchain.chains.question_answering import load_qa_chain
|
4 |
+
from langchain.document_loaders import UnstructuredPDFLoader,UnstructuredWordDocumentLoader
|
5 |
+
from langchain.indexes import VectorstoreIndexCreator
|
6 |
+
from langchain.vectorstores import FAISS
|
7 |
+
from langchain import HuggingFaceHub
|
8 |
+
from langchain import PromptTemplate
|
9 |
+
from langchain.chat_models import ChatOpenAI
|
10 |
+
from zipfile import ZipFile
|
11 |
+
import gradio as gr
|
12 |
+
import openpyxl
|
13 |
+
import os
|
14 |
+
import shutil
|
15 |
+
from langchain.schema import Document
|
16 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
17 |
+
import tiktoken
|
18 |
+
import secrets
|
19 |
+
import openai
|
20 |
+
import time
|
21 |
+
|
22 |
+
tokenizer = tiktoken.encoding_for_model("gpt-3.5-turbo")
|
23 |
+
|
24 |
+
# create the length function
|
25 |
+
def tiktoken_len(text):
|
26 |
+
tokens = tokenizer.encode(
|
27 |
+
text,
|
28 |
+
disallowed_special=()
|
29 |
+
)
|
30 |
+
return len(tokens)
|
31 |
+
|
32 |
+
text_splitter = RecursiveCharacterTextSplitter(
|
33 |
+
chunk_size=600,
|
34 |
+
chunk_overlap=200,
|
35 |
+
length_function=tiktoken_len,
|
36 |
+
separators=["\n\n", "\n", " ", ""]
|
37 |
+
)
|
38 |
+
|
39 |
+
embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
|
40 |
+
foo = Document(page_content='foo is fou!',metadata={"source":'foo source'})
|
41 |
+
|
42 |
+
def reset_database(ui_session_id):
|
43 |
+
session_id = f"PDFAISS-{ui_session_id}"
|
44 |
+
if 'drive' in session_id:
|
45 |
+
print("RESET DATABASE: session_id contains 'drive' !!")
|
46 |
+
return None
|
47 |
+
|
48 |
+
try:
|
49 |
+
shutil.rmtree(session_id)
|
50 |
+
except:
|
51 |
+
print(f'no {session_id} directory present')
|
52 |
+
|
53 |
+
try:
|
54 |
+
os.remove(f"{session_id}.zip")
|
55 |
+
except:
|
56 |
+
print("no {session_id}.zip present")
|
57 |
+
|
58 |
+
return None
|
59 |
+
|
60 |
+
def is_duplicate(split_docs,db):
|
61 |
+
epsilon=0.0
|
62 |
+
print(f"DUPLICATE: Treating: {split_docs[0].metadata['source'].split('/')[-1]}")
|
63 |
+
for i in range(min(3,len(split_docs))):
|
64 |
+
query = split_docs[i].page_content
|
65 |
+
docs = db.similarity_search_with_score(query,k=1)
|
66 |
+
_ , score = docs[0]
|
67 |
+
epsilon += score
|
68 |
+
print(f"DUPLICATE: epsilon: {epsilon}")
|
69 |
+
return epsilon < 0.1
|
70 |
+
|
71 |
+
def merge_split_docs_to_db(split_docs,db,progress,progress_step=0.1):
|
72 |
+
progress(progress_step,desc="merging docs")
|
73 |
+
if len(split_docs)==0:
|
74 |
+
print("MERGE to db: NO docs!!")
|
75 |
+
return
|
76 |
+
|
77 |
+
filename = split_docs[0].metadata['source']
|
78 |
+
if is_duplicate(split_docs,db):
|
79 |
+
print(f"MERGE: Document is duplicated: {filename}")
|
80 |
+
return
|
81 |
+
print(f"MERGE: number of split docs: {len(split_docs)}")
|
82 |
+
batch = 10
|
83 |
+
for i in range(0, len(split_docs), batch):
|
84 |
+
progress(i/len(split_docs),desc=f"added {i} chunks of {len(split_docs)} chunks")
|
85 |
+
db1 = FAISS.from_documents(split_docs[i:i+batch], embeddings)
|
86 |
+
db.merge_from(db1)
|
87 |
+
return db
|
88 |
+
|
89 |
+
def merge_pdf_to_db(filename,db,progress,progress_step=0.1):
|
90 |
+
progress_step+=0.05
|
91 |
+
progress(progress_step,'unpacking pdf')
|
92 |
+
doc = UnstructuredPDFLoader(filename).load()
|
93 |
+
doc[0].metadata['source'] = filename.split('/')[-1]
|
94 |
+
split_docs = text_splitter.split_documents(doc)
|
95 |
+
progress_step+=0.3
|
96 |
+
progress(progress_step,'docx unpacked')
|
97 |
+
return merge_split_docs_to_db(split_docs,db,progress,progress_step)
|
98 |
+
|
99 |
+
def merge_docx_to_db(filename,db,progress,progress_step=0.1):
|
100 |
+
progress_step+=0.05
|
101 |
+
progress(progress_step,'unpacking docx')
|
102 |
+
doc = UnstructuredWordDocumentLoader(filename).load()
|
103 |
+
doc[0].metadata['source'] = filename.split('/')[-1]
|
104 |
+
split_docs = text_splitter.split_documents(doc)
|
105 |
+
progress_step+=0.3
|
106 |
+
progress(progress_step,'docx unpacked')
|
107 |
+
return merge_split_docs_to_db(split_docs,db,progress,progress_step)
|
108 |
+
|
109 |
+
def merge_txt_to_db(filename,db,progress,progress_step=0.1):
|
110 |
+
progress_step+=0.05
|
111 |
+
progress(progress_step,'unpacking txt')
|
112 |
+
with open(filename) as f:
|
113 |
+
docs = text_splitter.split_text(f.read())
|
114 |
+
split_docs = [Document(page_content=doc,metadata={'source':filename.split('/')[-1]}) for doc in docs]
|
115 |
+
progress_step+=0.3
|
116 |
+
progress(progress_step,'txt unpacked')
|
117 |
+
return merge_split_docs_to_db(split_docs,db,progress,progress_step)
|
118 |
+
|
119 |
+
def unpack_zip_file(filename,db,progress):
|
120 |
+
with ZipFile(filename, 'r') as zipObj:
|
121 |
+
contents = zipObj.namelist()
|
122 |
+
print(f"unpack zip: contents: {contents}")
|
123 |
+
tmp_directory = filename.split('/')[-1].split('.')[-2]
|
124 |
+
shutil.unpack_archive(filename, tmp_directory)
|
125 |
+
|
126 |
+
if 'index.faiss' in [item.lower() for item in contents]:
|
127 |
+
db2 = FAISS.load_local(tmp_directory, embeddings)
|
128 |
+
db.merge_from(db2)
|
129 |
+
return db
|
130 |
+
|
131 |
+
for file in contents:
|
132 |
+
if file.lower().endswith('.docx'):
|
133 |
+
db = merge_docx_to_db(f"{tmp_directory}/{file}",db,progress)
|
134 |
+
if file.lower().endswith('.pdf'):
|
135 |
+
db = merge_pdf_to_db(f"{tmp_directory}/{file}",db,progress)
|
136 |
+
if file.lower().endswith('.txt'):
|
137 |
+
db = merge_txt_to_db(f"{tmp_directory}/{file}",db,progress)
|
138 |
+
return db
|
139 |
+
|
140 |
+
def add_files_to_zip(session_id):
|
141 |
+
zip_file_name = f"{session_id}.zip"
|
142 |
+
with ZipFile(zip_file_name, "w") as zipObj:
|
143 |
+
for root, dirs, files in os.walk(session_id):
|
144 |
+
for file_name in files:
|
145 |
+
file_path = os.path.join(root, file_name)
|
146 |
+
arcname = os.path.relpath(file_path, session_id)
|
147 |
+
zipObj.write(file_path, arcname)
|
148 |
+
|
149 |
+
## Summary functions ##
|
150 |
+
|
151 |
+
## Load each doc from the vector store
|
152 |
+
def load_docs(ui_session_id):
|
153 |
+
session_id_global_db = f"PDFAISS-{ui_session_id}"
|
154 |
+
try:
|
155 |
+
db = FAISS.load_local(session_id_global_db,embeddings)
|
156 |
+
print("load_docs after loading global db:",session_id_global_db,len(db.index_to_docstore_id))
|
157 |
+
except:
|
158 |
+
return f"SESSION: {session_id_global_db} database does not exist","",""
|
159 |
+
docs = []
|
160 |
+
for i in range(1,len(db.index_to_docstore_id)):
|
161 |
+
docs.append(db.docstore.search(db.index_to_docstore_id[i]))
|
162 |
+
return docs
|
163 |
+
|
164 |
+
|
165 |
+
# summarize with gpt 3.5 turbo
|
166 |
+
def summarize_gpt(doc,system='provide a summary of the following document: ', first_tokens=600):
|
167 |
+
doc = doc.replace('\n\n\n', '').replace('---', '').replace('...', '').replace('___', '')
|
168 |
+
encoded = tokenizer.encode(doc)
|
169 |
+
print("/n TOKENIZED : ", encoded)
|
170 |
+
decoded = tokenizer.decode(encoded[:min(first_tokens, len(encoded))])
|
171 |
+
print("/n DOC SHORTEN", min(first_tokens, len(encoded)), " : ", decoded)
|
172 |
+
completion = openai.ChatCompletion.create(
|
173 |
+
model="gpt-3.5-turbo",
|
174 |
+
messages=[
|
175 |
+
{"role": "system", "content": system},
|
176 |
+
{"role": "user", "content": decoded}
|
177 |
+
]
|
178 |
+
)
|
179 |
+
return completion.choices[0].message["content"]
|
180 |
+
|
181 |
+
|
182 |
+
def summarize_docs_generator(apikey_input, session_id):
|
183 |
+
openai.api_key = apikey_input
|
184 |
+
docs=load_docs(session_id)
|
185 |
+
print("################# DOCS LOADED ##################", "docs type : ", type(docs[0]))
|
186 |
+
|
187 |
+
try:
|
188 |
+
fail = docs[0].page_content
|
189 |
+
except:
|
190 |
+
return docs[0]
|
191 |
+
|
192 |
+
source = ""
|
193 |
+
summaries = ""
|
194 |
+
i = 0
|
195 |
+
while i<len(docs):
|
196 |
+
doc = docs[i]
|
197 |
+
unique_doc = ""
|
198 |
+
if source != doc.metadata:
|
199 |
+
unique_doc = ''.join([doc.page_content for doc in docs[i:i+3]])
|
200 |
+
print("\n\n****Open AI API called****\n\n")
|
201 |
+
if i == 0:
|
202 |
+
try:
|
203 |
+
summary = summarize_gpt(unique_doc)
|
204 |
+
except:
|
205 |
+
return f"ERROR : Try checking the validity of the provided OpenAI API Key"
|
206 |
+
else:
|
207 |
+
try:
|
208 |
+
summary = summarize_gpt(unique_doc)
|
209 |
+
except:
|
210 |
+
print(f"ERROR : There was an error but it is not linked with the validity of api key, taking a 20s nap")
|
211 |
+
yield summaries + f"\n\n °°° OpenAI error, please wait 20 sec of cooldown. °°°"
|
212 |
+
time.sleep(20)
|
213 |
+
summary = summarize_gpt(unique_doc)
|
214 |
+
|
215 |
+
print("SUMMARY : ", summary)
|
216 |
+
summaries += f"Source : {doc.metadata['source'].split('/')[-1]}\n{summary} \n\n"
|
217 |
+
source = doc.metadata
|
218 |
+
yield summaries
|
219 |
+
i+=1
|
220 |
+
yield summaries
|
221 |
+
|
222 |
+
|
223 |
+
def summarize_docs(apikey_input, session_id):
|
224 |
+
gen = summarize_docs_generator(apikey_input, session_id)
|
225 |
+
while True:
|
226 |
+
try:
|
227 |
+
yield str(next(gen))
|
228 |
+
except StopIteration:
|
229 |
+
return
|
230 |
+
|
231 |
+
#### UI Functions ####
|
232 |
+
|
233 |
+
def embed_files(files,ui_session_id,progress=gr.Progress(),progress_step=0.05):
|
234 |
+
print(files)
|
235 |
+
progress(progress_step,desc="Starting...")
|
236 |
+
split_docs=[]
|
237 |
+
if len(ui_session_id)==0:
|
238 |
+
ui_session_id = secrets.token_urlsafe(16)
|
239 |
+
session_id = f"PDFAISS-{ui_session_id}"
|
240 |
+
|
241 |
+
try:
|
242 |
+
db = FAISS.load_local(session_id,embeddings)
|
243 |
+
except:
|
244 |
+
print(f"SESSION: {session_id} database does not exist, create a FAISS db")
|
245 |
+
db = FAISS.from_documents([foo], embeddings)
|
246 |
+
db.save_local(session_id)
|
247 |
+
print(f"SESSION: {session_id} database created")
|
248 |
+
|
249 |
+
print("EMBEDDED, before embeddeding: ",session_id,len(db.index_to_docstore_id))
|
250 |
+
for file_id,file in enumerate(files):
|
251 |
+
print("ID : ", file_id,"FILE : ", file)
|
252 |
+
file_type = file.name.split('.')[-1].lower()
|
253 |
+
source = file.name.split('/')[-1]
|
254 |
+
print(f"current file: {source}")
|
255 |
+
progress(file_id/len(files),desc=f"Treating {source}")
|
256 |
+
|
257 |
+
if file_type == 'pdf':
|
258 |
+
db2 = merge_pdf_to_db(file.name,db,progress)
|
259 |
+
|
260 |
+
if file_type == 'txt':
|
261 |
+
db2 = merge_txt_to_db(file.name,db,progress)
|
262 |
+
|
263 |
+
if file_type == 'docx':
|
264 |
+
db2 = merge_docx_to_db(file.name,db,progress)
|
265 |
+
|
266 |
+
if file_type == 'zip':
|
267 |
+
db2 = unpack_zip_file(file.name,db,progress)
|
268 |
+
|
269 |
+
if db2 != None:
|
270 |
+
db = db2
|
271 |
+
db.save_local(session_id)
|
272 |
+
### move file to store ###
|
273 |
+
progress(progress_step, desc = 'moving file to store')
|
274 |
+
directory_path = f"{session_id}/store/"
|
275 |
+
if not os.path.exists(directory_path):
|
276 |
+
os.makedirs(directory_path)
|
277 |
+
try:
|
278 |
+
shutil.move(file.name, directory_path)
|
279 |
+
except:
|
280 |
+
pass
|
281 |
+
|
282 |
+
### load the updated db and zip it ###
|
283 |
+
progress(progress_step, desc = 'loading db')
|
284 |
+
db = FAISS.load_local(session_id,embeddings)
|
285 |
+
print("EMBEDDED, after embeddeding: ",session_id,len(db.index_to_docstore_id))
|
286 |
+
progress(progress_step, desc = 'zipping db for download')
|
287 |
+
add_files_to_zip(session_id)
|
288 |
+
print(f"EMBEDDED: db zipped")
|
289 |
+
progress(progress_step, desc = 'db zipped')
|
290 |
+
return f"{session_id}.zip",ui_session_id
|
291 |
+
|
292 |
+
def display_docs(docs):
|
293 |
+
output_str = ''
|
294 |
+
for i, doc in enumerate(docs):
|
295 |
+
source = doc.metadata['source'].split('/')[-1]
|
296 |
+
output_str += f"Ref: {i+1}\n{repr(doc.page_content)}\nSource: {source}\n\n"
|
297 |
+
return output_str
|
298 |
+
|
299 |
+
def ask_gpt(query, apikey,history,ui_session_id):
|
300 |
+
session_id = f"PDFAISS-{ui_session_id}"
|
301 |
+
try:
|
302 |
+
db = FAISS.load_local(session_id,embeddings)
|
303 |
+
print("ASKGPT after loading",session_id,len(db.index_to_docstore_id))
|
304 |
+
except:
|
305 |
+
print(f"SESSION: {session_id} database does not exist")
|
306 |
+
return f"SESSION: {session_id} database does not exist","",""
|
307 |
+
|
308 |
+
docs = db.similarity_search(query)
|
309 |
+
history += f"[query]\n{query}\n[answer]\n"
|
310 |
+
if(apikey==""):
|
311 |
+
history += f"None\n[references]\n{display_docs(docs)}\n\n"
|
312 |
+
return "No answer from GPT", display_docs(docs),history
|
313 |
+
else:
|
314 |
+
llm = ChatOpenAI(temperature=0, model_name = 'gpt-3.5-turbo', openai_api_key=apikey)
|
315 |
+
chain = load_qa_chain(llm, chain_type="stuff")
|
316 |
+
answer = chain.run(input_documents=docs, question=query, verbose=True)
|
317 |
+
history += f"{answer}\n[references]\n{display_docs(docs)}\n\n"
|
318 |
+
return answer,display_docs(docs),history
|
319 |
+
|
320 |
+
with gr.Blocks() as demo:
|
321 |
+
gr.Markdown("Upload your documents and question them.")
|
322 |
+
with gr.Accordion("Open to enter your API key", open=False):
|
323 |
+
apikey_input = gr.Textbox(placeholder="Type here your OpenAI API key to use Summarization and Q&A", label="OpenAI API Key",type='password')
|
324 |
+
with gr.Tab("Upload PDF & TXT"):
|
325 |
+
tb_session_id = gr.Textbox(label='session id')
|
326 |
+
docs_input = gr.File(file_count="multiple", file_types=[".txt", ".pdf",".zip",".docx"])
|
327 |
+
db_output = gr.outputs.File(label="Download zipped database")
|
328 |
+
btn_generate_db = gr.Button("Generate database")
|
329 |
+
btn_reset_db = gr.Button("Reset database")
|
330 |
+
|
331 |
+
with gr.Tab("Summarize PDF"):
|
332 |
+
with gr.Column():
|
333 |
+
summary_output = gr.Textbox(label='Summarized files')
|
334 |
+
btn_summary = gr.Button("Summarize")
|
335 |
+
summary_output.style(show_copy_button=True)
|
336 |
+
|
337 |
+
|
338 |
+
with gr.Tab("Ask PDF"):
|
339 |
+
with gr.Column():
|
340 |
+
query_input = gr.Textbox(placeholder="Type your question", label="Question")
|
341 |
+
btn_askGPT = gr.Button("Answer")
|
342 |
+
answer_output = gr.Textbox(label='GPT 3.5 answer')
|
343 |
+
answer_output.style(show_copy_button=True)
|
344 |
+
sources = gr.Textbox(label='Sources')
|
345 |
+
sources.style(show_copy_button=True)
|
346 |
+
history = gr.Textbox(label='History')
|
347 |
+
history.style(show_copy_button=True)
|
348 |
+
|
349 |
+
btn_generate_db.click(embed_files, inputs=[docs_input,tb_session_id], outputs=[db_output,tb_session_id])
|
350 |
+
btn_reset_db.click(reset_database,inputs=[tb_session_id],outputs=[db_output])
|
351 |
+
btn_summary.click(summarize_docs, inputs=[apikey_input,tb_session_id], outputs=summary_output)
|
352 |
+
btn_askGPT.click(ask_gpt, inputs=[query_input,apikey_input,history,tb_session_id], outputs=[answer_output,sources,history])
|
353 |
+
|
354 |
+
demo.queue(concurrency_count=10)
|
355 |
+
demo.launch(debug=False,share=False)
|
requirements.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
langchain
|
2 |
+
huggingface_hub
|
3 |
+
sentence_transformers
|
4 |
+
openai
|
5 |
+
unstructured==0.6.3
|
6 |
+
tiktoken
|
7 |
+
faiss-cpu
|
8 |
+
gradio
|