Spaces:
Runtime error
Runtime error
wushuangBaOYWHA
commited on
Commit
β’
3caf952
1
Parent(s):
575f89c
Init
Browse files- README.md +57 -13
- app.py +238 -0
- requirements.txt +7 -0
README.md
CHANGED
@@ -1,13 +1,57 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
# pdfGPT
|
3 |
+
### Problem Description :
|
4 |
+
1. When you pass a large text to Open AI, it suffers from a 4K token limit. It cannot take an entire pdf file as an input
|
5 |
+
2. Open AI sometimes becomes overtly chatty and returns irrelevant response not directly related to your query. This is because Open AI uses poor embeddings.
|
6 |
+
3. ChatGPT cannot directly talk to external data.
|
7 |
+
4. There are a number of solutions like https://www.chatpdf.com, https://www.bespacific.com/chat-with-any-pdf/, filechat.io but none of them is open source. In addition, their navigation is not just one step and to the point. Moreover, the content quality is not good due to usage of OpenAI embeddings which are not very good.
|
8 |
+
|
9 |
+
### Solution: What is PDF GPT ?
|
10 |
+
1. PDF GPT allows you to chat with an uploaded PDF file using GPT functionalities.
|
11 |
+
2. The application intelligently breaks the document into smaller chunks and employs a powerful Deep Averaging Network Encoder to generate embeddings.
|
12 |
+
3. A semantic search is first performed on your pdf content and the most relevant embeddings are passed to the Open AI.
|
13 |
+
4. A custom logic generates precise responses. The returned response can even cite the page number in square brackets([]) where the information is located, adding credibility to the responses and helping to locate pertinent information quickly. The Responses are much better than the naive responses by Open AI.
|
14 |
+
|
15 |
+
### Demo
|
16 |
+
Demo URL: https://bit.ly/41ZXBJM
|
17 |
+
|
18 |
+
**NOTE**: Please star this project if you like it!
|
19 |
+
### UML
|
20 |
+
```mermaid
|
21 |
+
sequenceDiagram
|
22 |
+
participant User
|
23 |
+
participant System
|
24 |
+
|
25 |
+
User->>System: Enter API Key
|
26 |
+
User->>System: Upload PDF/PDF URL
|
27 |
+
User->>System: Ask Question
|
28 |
+
User->>System: Submit Call to Action
|
29 |
+
|
30 |
+
System->>System: Blank field Validations
|
31 |
+
System->>System: Convert PDF to Text
|
32 |
+
System->>System: Decompose Text to Chunks (150 word length)
|
33 |
+
System->>System: Check if embeddings file exists
|
34 |
+
System->>System: If file exists, load embeddings and set the fitted attribute to True
|
35 |
+
System->>System: If file doesn't exist, generate embeddings, fit the recommender, save embeddings to file and set fitted attribute to True
|
36 |
+
System->>System: Perform Semantic Search and return Top 5 Chunks with KNN
|
37 |
+
System->>System: Load Open AI prompt
|
38 |
+
System->>System: Embed Top 5 Chunks in Open AI Prompt
|
39 |
+
System->>System: Generate Answer with Davinci
|
40 |
+
|
41 |
+
System-->>User: Return Answer
|
42 |
+
```
|
43 |
+
|
44 |
+
### Flowchart
|
45 |
+
```mermaid
|
46 |
+
flowchart TB
|
47 |
+
A[Input] --> B[URL]
|
48 |
+
A -- Upload File manually --> C[Parse PDF]
|
49 |
+
B --> D[Parse PDF] -- Preprocess --> E[Dynamic Text Chunks]
|
50 |
+
C -- Preprocess --> E[Dynamic Text Chunks with citation history]
|
51 |
+
E --Fit-->F[Generate text embedding with Deep Averaging Network Encoder on each chunk]
|
52 |
+
F -- Query --> G[Get Top Results]
|
53 |
+
G -- K-Nearest Neighbour --> K[Get Nearest Neighbour - matching citation references]
|
54 |
+
K -- Generate Prompt --> H[Generate Answer]
|
55 |
+
H -- Output --> I[Output]
|
56 |
+
```
|
57 |
+
|
app.py
ADDED
@@ -0,0 +1,238 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import urllib.request
|
2 |
+
import fitz
|
3 |
+
import re
|
4 |
+
import numpy as np
|
5 |
+
import tensorflow_hub as hub
|
6 |
+
import openai
|
7 |
+
import gradio as gr
|
8 |
+
import os
|
9 |
+
from sklearn.neighbors import NearestNeighbors
|
10 |
+
|
11 |
+
def download_pdf(url, output_path):
|
12 |
+
urllib.request.urlretrieve(url, output_path)
|
13 |
+
|
14 |
+
|
15 |
+
def preprocess(text):
|
16 |
+
text = text.replace('\n', ' ')
|
17 |
+
text = re.sub('\s+', ' ', text)
|
18 |
+
return text
|
19 |
+
|
20 |
+
|
21 |
+
def pdf_to_text(path, start_page=1, end_page=None):
|
22 |
+
doc = fitz.open(path)
|
23 |
+
total_pages = doc.page_count
|
24 |
+
|
25 |
+
if end_page is None:
|
26 |
+
end_page = total_pages
|
27 |
+
|
28 |
+
text_list = []
|
29 |
+
|
30 |
+
for i in range(start_page-1, end_page):
|
31 |
+
text = doc.load_page(i).get_text("text")
|
32 |
+
text = preprocess(text)
|
33 |
+
text_list.append(text)
|
34 |
+
|
35 |
+
doc.close()
|
36 |
+
return text_list
|
37 |
+
|
38 |
+
|
39 |
+
def text_to_chunks(texts, word_length=150, start_page=1):
|
40 |
+
text_toks = [t.split(' ') for t in texts]
|
41 |
+
page_nums = []
|
42 |
+
chunks = []
|
43 |
+
|
44 |
+
for idx, words in enumerate(text_toks):
|
45 |
+
for i in range(0, len(words), word_length):
|
46 |
+
chunk = words[i:i+word_length]
|
47 |
+
if (i+word_length) > len(words) and (len(chunk) < word_length) and (
|
48 |
+
len(text_toks) != (idx+1)):
|
49 |
+
text_toks[idx+1] = chunk + text_toks[idx+1]
|
50 |
+
continue
|
51 |
+
chunk = ' '.join(chunk).strip()
|
52 |
+
chunk = f'[{idx+start_page}]' + ' ' + '"' + chunk + '"'
|
53 |
+
chunks.append(chunk)
|
54 |
+
return chunks
|
55 |
+
|
56 |
+
|
57 |
+
class SemanticSearch:
|
58 |
+
|
59 |
+
def __init__(self):
|
60 |
+
self.use = hub.load('https://tfhub.dev/google/universal-sentence-encoder/4')
|
61 |
+
self.fitted = False
|
62 |
+
|
63 |
+
|
64 |
+
def fit(self, data, batch=1000, n_neighbors=5):
|
65 |
+
self.data = data
|
66 |
+
self.embeddings = self.get_text_embedding(data, batch=batch)
|
67 |
+
n_neighbors = min(n_neighbors, len(self.embeddings))
|
68 |
+
self.nn = NearestNeighbors(n_neighbors=n_neighbors)
|
69 |
+
self.nn.fit(self.embeddings)
|
70 |
+
self.fitted = True
|
71 |
+
|
72 |
+
|
73 |
+
def __call__(self, text, return_data=True):
|
74 |
+
inp_emb = self.use([text])
|
75 |
+
neighbors = self.nn.kneighbors(inp_emb, return_distance=False)[0]
|
76 |
+
|
77 |
+
if return_data:
|
78 |
+
return [self.data[i] for i in neighbors]
|
79 |
+
else:
|
80 |
+
return neighbors
|
81 |
+
|
82 |
+
|
83 |
+
def get_text_embedding(self, texts, batch=1000):
|
84 |
+
embeddings = []
|
85 |
+
for i in range(0, len(texts), batch):
|
86 |
+
text_batch = texts[i:(i+batch)]
|
87 |
+
emb_batch = self.use(text_batch)
|
88 |
+
embeddings.append(emb_batch)
|
89 |
+
embeddings = np.vstack(embeddings)
|
90 |
+
return embeddings
|
91 |
+
|
92 |
+
|
93 |
+
|
94 |
+
#def load_recommender(path, start_page=1):
|
95 |
+
# global recommender
|
96 |
+
# texts = pdf_to_text(path, start_page=start_page)
|
97 |
+
# chunks = text_to_chunks(texts, start_page=start_page)
|
98 |
+
# recommender.fit(chunks)
|
99 |
+
# return 'Corpus Loaded.'
|
100 |
+
|
101 |
+
# The modified function generates embeddings based on PDF file name and page number and checks if the embeddings file exists before loading or generating it.
|
102 |
+
def load_recommender(path, start_page=1):
|
103 |
+
global recommender
|
104 |
+
pdf_file = os.path.basename(path)
|
105 |
+
embeddings_file = f"{pdf_file}_{start_page}.npy"
|
106 |
+
|
107 |
+
if os.path.isfile(embeddings_file):
|
108 |
+
embeddings = np.load(embeddings_file)
|
109 |
+
recommender.embeddings = embeddings
|
110 |
+
recommender.fitted = True
|
111 |
+
return "Embeddings loaded from file"
|
112 |
+
|
113 |
+
texts = pdf_to_text(path, start_page=start_page)
|
114 |
+
chunks = text_to_chunks(texts, start_page=start_page)
|
115 |
+
recommender.fit(chunks)
|
116 |
+
np.save(embeddings_file, recommender.embeddings)
|
117 |
+
return 'Corpus Loaded.'
|
118 |
+
|
119 |
+
|
120 |
+
|
121 |
+
def generate_text(openAI_key,prompt, engine="text-davinci-003"):
|
122 |
+
openai.api_key = openAI_key
|
123 |
+
completions = openai.Completion.create(
|
124 |
+
engine=engine,
|
125 |
+
prompt=prompt,
|
126 |
+
max_tokens=512,
|
127 |
+
n=1,
|
128 |
+
stop=None,
|
129 |
+
temperature=0.7,
|
130 |
+
)
|
131 |
+
message = completions.choices[0].text
|
132 |
+
return message
|
133 |
+
|
134 |
+
|
135 |
+
def generate_answer(question,openAI_key):
|
136 |
+
topn_chunks = recommender(question)
|
137 |
+
prompt = ""
|
138 |
+
prompt += 'search results:\n\n'
|
139 |
+
for c in topn_chunks:
|
140 |
+
prompt += c + '\n\n'
|
141 |
+
|
142 |
+
prompt += "Instructions: Compose a comprehensive reply to the query using the search results given. "\
|
143 |
+
"Cite each reference using [number] notation (every result has this number at the beginning). "\
|
144 |
+
"Citation should be done at the end of each sentence. If the search results mention multiple subjects "\
|
145 |
+
"with the same name, create separate answers for each. Only include information found in the results and "\
|
146 |
+
"don't add any additional information. Make sure the answer is correct and don't output false content. "\
|
147 |
+
"If the text does not relate to the query, simply state 'Found Nothing'. Ignore outlier "\
|
148 |
+
"search results which has nothing to do with the question. Only answer what is asked. The "\
|
149 |
+
"answer should be short and concise.\n\nQuery: {question}\nAnswer: "
|
150 |
+
|
151 |
+
prompt += f"Query: {question}\nAnswer:"
|
152 |
+
answer = generate_text(openAI_key, prompt,"text-davinci-003")
|
153 |
+
return answer
|
154 |
+
|
155 |
+
|
156 |
+
def question_answer(url, file, question,openAI_key):
|
157 |
+
if openAI_key.strip()=='':
|
158 |
+
return '[ERROR]: Please enter you Open AI Key. Get your key here : https://platform.openai.com/account/api-keys'
|
159 |
+
if url.strip() == '' and file == None:
|
160 |
+
return '[ERROR]: Both URL and PDF is empty. Provide atleast one.'
|
161 |
+
|
162 |
+
if url.strip() != '' and file != None:
|
163 |
+
return '[ERROR]: Both URL and PDF is provided. Please provide only one (eiter URL or PDF).'
|
164 |
+
|
165 |
+
if url.strip() != '':
|
166 |
+
glob_url = url
|
167 |
+
download_pdf(glob_url, 'corpus.pdf')
|
168 |
+
load_recommender('corpus.pdf')
|
169 |
+
|
170 |
+
else:
|
171 |
+
old_file_name = file.name
|
172 |
+
file_name = file.name
|
173 |
+
file_name = file_name[:-12] + file_name[-4:]
|
174 |
+
os.rename(old_file_name, file_name)
|
175 |
+
load_recommender(file_name)
|
176 |
+
|
177 |
+
if question.strip() == '':
|
178 |
+
return '[ERROR]: Question field is empty'
|
179 |
+
|
180 |
+
return generate_answer(question,openAI_key)
|
181 |
+
|
182 |
+
|
183 |
+
recommender = SemanticSearch()
|
184 |
+
|
185 |
+
title = 'PDF GPT'
|
186 |
+
description = """ What is PDF GPT ?
|
187 |
+
1. The problem is that Open AI has a 4K token limit and cannot take an entire PDF file as input. Additionally, it sometimes returns irrelevant responses due to poor embeddings. ChatGPT cannot directly talk to external data. The solution is PDF GPT, which allows you to chat with an uploaded PDF file using GPT functionalities. The application breaks the document into smaller chunks and generates embeddings using a powerful Deep Averaging Network Encoder. A semantic search is performed on your query, and the top relevant chunks are used to generate a response.
|
188 |
+
2. The returned response can even cite the page number in square brackets([]) where the information is located, adding credibility to the responses and helping to locate pertinent information quickly. The Responses are much better than the naive responses by Open AI."""
|
189 |
+
|
190 |
+
with gr.Blocks() as demo:
|
191 |
+
|
192 |
+
gr.Markdown(f'<center><h1>{title}</h1></center>')
|
193 |
+
gr.Markdown(description)
|
194 |
+
|
195 |
+
with gr.Row():
|
196 |
+
|
197 |
+
with gr.Group():
|
198 |
+
gr.Markdown(f'<p style="text-align:center">Get your Open AI API key <a href="https://platform.openai.com/account/api-keys">here</a></p>')
|
199 |
+
openAI_key=gr.Textbox(label='Enter your OpenAI API key here')
|
200 |
+
url = gr.Textbox(label='Enter PDF URL here')
|
201 |
+
gr.Markdown("<center><h4>OR<h4></center>")
|
202 |
+
file = gr.File(label='Upload your PDF/ Research Paper / Book here', file_types=['.pdf'])
|
203 |
+
question = gr.Textbox(label='Enter your question here')
|
204 |
+
btn = gr.Button(value='Submit')
|
205 |
+
btn.style(full_width=True)
|
206 |
+
|
207 |
+
with gr.Group():
|
208 |
+
answer = gr.Textbox(label='The answer to your question is :')
|
209 |
+
|
210 |
+
btn.click(question_answer, inputs=[url, file, question,openAI_key], outputs=[answer])
|
211 |
+
#openai.api_key = os.getenv('Your_Key_Here')
|
212 |
+
demo.launch()
|
213 |
+
|
214 |
+
|
215 |
+
# import streamlit as st
|
216 |
+
|
217 |
+
# #Define the app layout
|
218 |
+
# st.markdown(f'<center><h1>{title}</h1></center>', unsafe_allow_html=True)
|
219 |
+
# st.markdown(description)
|
220 |
+
|
221 |
+
# col1, col2 = st.columns(2)
|
222 |
+
|
223 |
+
# # Define the inputs in the first column
|
224 |
+
# with col1:
|
225 |
+
# url = st.text_input('URL')
|
226 |
+
# st.markdown("<center><h6>or<h6></center>", unsafe_allow_html=True)
|
227 |
+
# file = st.file_uploader('PDF', type='pdf')
|
228 |
+
# question = st.text_input('question')
|
229 |
+
# btn = st.button('Submit')
|
230 |
+
|
231 |
+
# # Define the output in the second column
|
232 |
+
# with col2:
|
233 |
+
# answer = st.text_input('answer')
|
234 |
+
|
235 |
+
# # Define the button action
|
236 |
+
# if btn:
|
237 |
+
# answer_value = question_answer(url, file, question)
|
238 |
+
# answer.value = answer_value
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
PyMuPDF
|
2 |
+
numpy==1.19.5
|
3 |
+
scikit-learn
|
4 |
+
tensorflow>=2.0.0
|
5 |
+
tensorflow-hub
|
6 |
+
openai==0.10.2
|
7 |
+
|