Upload 11 files
Browse files- .gitattributes +1 -0
- README.md +32 -12
- app.py +144 -0
- arxiv.py +55 -0
- chain.py +128 -0
- ingest.py +99 -0
- ingest.sh +6 -0
- ingest_examples.py +219 -0
- ingest_faiss.py +41 -0
- paper +3 -0
- qa.py +26 -0
- requirements.txt +10 -0
.gitattributes
CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
35 |
+
paper filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -1,12 +1,32 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# PaperChat
|
2 |
+
|
3 |
+
This repo is an implementation of a chatbot specifically focused on question answering over the [LangChain documentation](https://langchain.readthedocs.io/en/latest/).
|
4 |
+
|
5 |
+
## 🚀 Important Links
|
6 |
+
|
7 |
+
Website: [chat.langchain.dev](https://chat.langchain.dev)
|
8 |
+
|
9 |
+
Hugging Face Spage: [huggingface.co/spaces/hwchase17/chat-langchain](https://huggingface.co/spaces/hwchase17/chat-langchain)
|
10 |
+
|
11 |
+
Blog Post: [blog.langchain.dev/langchain-chat/](https://blog.langchain.dev/langchain-chat/)
|
12 |
+
|
13 |
+
## 📚 Technical description
|
14 |
+
|
15 |
+
There are two components: ingestion and question-answering.
|
16 |
+
|
17 |
+
Ingestion has the following steps:
|
18 |
+
|
19 |
+
1. Pull html from documentation site
|
20 |
+
2. Parse html with BeautifulSoup
|
21 |
+
3. Split documents with LangChain's [TextSplitter](https://langchain.readthedocs.io/en/latest/modules/utils/combine_docs_examples/textsplitter.html)
|
22 |
+
4. Create a vectorstore of embeddings, using LangChain's [vectorstore wrapper](https://langchain.readthedocs.io/en/latest/modules/utils/combine_docs_examples/vectorstores.html) (with OpenAI's embeddings and Weaviate's vectorstore)
|
23 |
+
|
24 |
+
Question-Answering has the following steps:
|
25 |
+
|
26 |
+
1. Given the chat history and new user input, determine what a standalone question would be (using GPT-3)
|
27 |
+
2. Given that standalone question, look up relevant documents from the vectorstore
|
28 |
+
3. Pass the standalone question and relevant documents to GPT-3 to generate a final answer
|
29 |
+
|
30 |
+
## 🧠 How to Extend to your documentation
|
31 |
+
|
32 |
+
Coming soon.
|
app.py
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datetime
|
2 |
+
import os
|
3 |
+
from langchain.chains import VectorDBQAWithSourcesChain
|
4 |
+
import gradio as gr
|
5 |
+
import langchain
|
6 |
+
import weaviate
|
7 |
+
from langchain.vectorstores import Weaviate
|
8 |
+
import faiss
|
9 |
+
import pickle
|
10 |
+
from langchain import OpenAI
|
11 |
+
from arxiv import get_paper
|
12 |
+
from ingest_faiss import create_vector_store
|
13 |
+
|
14 |
+
def get_vectorstore(suffix):
|
15 |
+
index = faiss.read_index(f"{suffix}/docs.index")
|
16 |
+
with open(f"{suffix}/faiss_store.pkl", "rb") as f:
|
17 |
+
store = pickle.load(f)
|
18 |
+
store.index = index
|
19 |
+
return store
|
20 |
+
|
21 |
+
def set_openai_api_key(api_key, agent):
|
22 |
+
if api_key:
|
23 |
+
os.environ["OPENAI_API_KEY"] = api_key
|
24 |
+
vectorstore = get_vectorstore()
|
25 |
+
qa_chain = VectorDBQAWithSourcesChain.from_llm(llm=OpenAI(temperature=0), vectorstore=vectorstore)
|
26 |
+
os.environ["OPENAI_API_KEY"] = ""
|
27 |
+
return qa_chain
|
28 |
+
|
29 |
+
def download_paper_and_embed(paper_arxiv_url, api_key):
|
30 |
+
if paper_arxiv_url and api_key:
|
31 |
+
paper_text = get_paper(paper_arxiv_url)
|
32 |
+
if 'abs' in paper_arxiv_url:
|
33 |
+
eprint_url = paper_arxiv_url.replace("https://arxiv.org/abs/", "https://arxiv.org/e-print/")
|
34 |
+
elif 'pdf' in paper_arxiv_url:
|
35 |
+
eprint_url = paper_arxiv_url.replace("https://arxiv.org/pdf/", "https://arxiv.org/e-print/")
|
36 |
+
else:
|
37 |
+
raise ValueError("Invalid arXiv URL")
|
38 |
+
suffix = 'paper-dir/' + eprint_url.replace("https://arxiv.org/e-print/", "")
|
39 |
+
if not os.path.exists(suffix + "/docs.index"):
|
40 |
+
create_vector_store(suffix, paper_text)
|
41 |
+
|
42 |
+
os.environ["OPENAI_API_KEY"] = api_key
|
43 |
+
vectorstore = get_vectorstore(suffix)
|
44 |
+
qa_chain = VectorDBQAWithSourcesChain.from_llm(llm=OpenAI(temperature=0), vectorstore=vectorstore)
|
45 |
+
os.environ["OPENAI_API_KEY"] = ""
|
46 |
+
return qa_chain
|
47 |
+
|
48 |
+
chain = None
|
49 |
+
|
50 |
+
def chat(inp, history, paper_arxiv_url, api_key, agent):
|
51 |
+
global chain
|
52 |
+
if history is None:
|
53 |
+
chain = download_paper_and_embed(paper_arxiv_url, api_key)
|
54 |
+
history = history or []
|
55 |
+
# if agent is None:
|
56 |
+
# history.append((inp, "Please paste your OpenAI key to use"))
|
57 |
+
# return history, history
|
58 |
+
print("\n==== date/time: " + str(datetime.datetime.now()) + " ====")
|
59 |
+
print("inp: " + inp)
|
60 |
+
history = history or []
|
61 |
+
agent = chain
|
62 |
+
output = agent({"question": inp})
|
63 |
+
answer = output["answer"]
|
64 |
+
sources = output["sources"]
|
65 |
+
history.append((inp, answer))
|
66 |
+
history.append(("Sources?", sources))
|
67 |
+
print(history)
|
68 |
+
return history, history
|
69 |
+
|
70 |
+
block = gr.Blocks(css=".gradio-container {background-color: lightgray}")
|
71 |
+
|
72 |
+
with block:
|
73 |
+
state = gr.State()
|
74 |
+
agent_state = gr.State()
|
75 |
+
with gr.Row():
|
76 |
+
gr.Markdown("<h3><center>PaperChat</center></h3>")
|
77 |
+
|
78 |
+
paper_arxiv_url = gr.Textbox(
|
79 |
+
placeholder="Paste the URL of the paper about which you want to ask a question",
|
80 |
+
show_label=False,
|
81 |
+
lines=1,
|
82 |
+
type="url",
|
83 |
+
)
|
84 |
+
|
85 |
+
openai_api_key_textbox = gr.Textbox(
|
86 |
+
placeholder="Paste your OpenAI API key (sk-...)",
|
87 |
+
show_label=False,
|
88 |
+
lines=1,
|
89 |
+
type="password",
|
90 |
+
)
|
91 |
+
|
92 |
+
# # button to download paper
|
93 |
+
# download_paper_button = gr.Button(
|
94 |
+
# value="Download paper and make embeddings",
|
95 |
+
# variant="secondary",
|
96 |
+
# ).click(
|
97 |
+
# download_paper_and_embed,
|
98 |
+
# inputs=[paper_arxiv_url, openai_api_key_textbox, agent_state],
|
99 |
+
# outputs=[agent_state],
|
100 |
+
# )
|
101 |
+
|
102 |
+
chatbot = gr.Chatbot()
|
103 |
+
|
104 |
+
with gr.Row():
|
105 |
+
message = gr.Textbox(
|
106 |
+
label="What's your question?",
|
107 |
+
placeholder="What's the answer to life, the universe, and everything?",
|
108 |
+
lines=1,
|
109 |
+
)
|
110 |
+
submit = gr.Button(value="Send", variant="secondary").style(full_width=False)
|
111 |
+
|
112 |
+
# gr.Examples(
|
113 |
+
# examples=[
|
114 |
+
# "What are agents?",
|
115 |
+
# "How do I summarize a long document?",
|
116 |
+
# "What types of memory exist?",
|
117 |
+
# ],
|
118 |
+
# inputs=message,
|
119 |
+
# )
|
120 |
+
|
121 |
+
gr.HTML(
|
122 |
+
"""This app demonstrates question-answering on any given arxiv paper"""
|
123 |
+
)
|
124 |
+
|
125 |
+
gr.HTML(
|
126 |
+
"<center>Powered by <a href='https://github.com/hwchase17/langchain'>LangChain 🦜️🔗</a></center>"
|
127 |
+
)
|
128 |
+
|
129 |
+
submit.click(chat, inputs=[message, state, paper_arxiv_url, openai_api_key_textbox, agent_state], outputs=[chatbot, state])
|
130 |
+
message.submit(chat, inputs=[message, state, paper_arxiv_url, openai_api_key_textbox, agent_state], outputs=[chatbot, state])
|
131 |
+
|
132 |
+
# paper_arxiv_url.change(
|
133 |
+
# download_paper_and_embed,
|
134 |
+
# inputs=[paper_arxiv_url, agent_state],
|
135 |
+
# outputs=[agent_state],
|
136 |
+
# )
|
137 |
+
|
138 |
+
# openai_api_key_textbox.change(
|
139 |
+
# set_openai_api_key,
|
140 |
+
# inputs=[openai_api_key_textbox, agent_state],
|
141 |
+
# outputs=[agent_state],
|
142 |
+
# )
|
143 |
+
|
144 |
+
block.launch(debug=True)
|
arxiv.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
from subprocess import call
|
3 |
+
import os
|
4 |
+
from pylatexenc.latex2text import LatexNodes2Text
|
5 |
+
|
6 |
+
def get_paper(paper_url):
|
7 |
+
if 'abs' in paper_url:
|
8 |
+
eprint_url = paper_url.replace("https://arxiv.org/abs/", "https://arxiv.org/e-print/")
|
9 |
+
elif 'pdf' in paper_url:
|
10 |
+
eprint_url = paper_url.replace("https://arxiv.org/pdf/", "https://arxiv.org/e-print/")
|
11 |
+
else:
|
12 |
+
raise ValueError("Invalid arXiv URL")
|
13 |
+
|
14 |
+
suffix = 'paper-dir/' + eprint_url.replace("https://arxiv.org/e-print/", "")
|
15 |
+
|
16 |
+
# check if the directory exists
|
17 |
+
if os.path.exists(suffix):
|
18 |
+
print("Paper already downloaded, skipping download")
|
19 |
+
else:
|
20 |
+
print("Downloading paper")
|
21 |
+
r = requests.get(eprint_url)
|
22 |
+
|
23 |
+
with open("paper", "wb") as f:
|
24 |
+
f.write(r.content)
|
25 |
+
|
26 |
+
# unzip gzipped tar file to new directory
|
27 |
+
call(["mkdir", suffix])
|
28 |
+
call(["tar", "-xzf", "paper", "-C", suffix])
|
29 |
+
|
30 |
+
# get the list of all .tex files in the directory
|
31 |
+
tex_files = [f for f in os.listdir(suffix) if f.endswith('.tex')]
|
32 |
+
# remove math_commands.tex from tex_files if it exists
|
33 |
+
if 'math_commands.tex' in tex_files:
|
34 |
+
tex_files.remove('math_commands.tex')
|
35 |
+
if len(tex_files) == 1:
|
36 |
+
# read the main tex file
|
37 |
+
with open(f'{suffix}/{tex_files[0]}', 'r') as f:
|
38 |
+
paper_tex = f.read()
|
39 |
+
elif len(tex_files) == 0:
|
40 |
+
raise ValueError("No .tex files found in the paper")
|
41 |
+
else:
|
42 |
+
raise ValueError("More than one .tex file found in the paper")
|
43 |
+
|
44 |
+
# convert latex to text
|
45 |
+
paper_text = LatexNodes2Text().latex_to_text(paper_tex)
|
46 |
+
|
47 |
+
with open(f"{suffix}/main.txt", 'w') as f:
|
48 |
+
f.write(paper_text)
|
49 |
+
|
50 |
+
return paper_text
|
51 |
+
|
52 |
+
if __name__=="__main__":
|
53 |
+
paper_url = "https://arxiv.org/abs/2206.08896"
|
54 |
+
paper_text = get_paper(paper_url)
|
55 |
+
print(paper_text)
|
chain.py
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
import pathlib
|
4 |
+
from typing import Dict, List, Tuple
|
5 |
+
|
6 |
+
import weaviate
|
7 |
+
from langchain import OpenAI, PromptTemplate
|
8 |
+
from langchain.chains import LLMChain
|
9 |
+
from langchain.chains.base import Chain
|
10 |
+
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
|
11 |
+
from langchain.chains.conversation.memory import ConversationBufferMemory
|
12 |
+
from langchain.chains.question_answering import load_qa_chain
|
13 |
+
from langchain.embeddings import OpenAIEmbeddings
|
14 |
+
from langchain.prompts import FewShotPromptTemplate, PromptTemplate
|
15 |
+
from langchain.prompts.example_selector import \
|
16 |
+
SemanticSimilarityExampleSelector
|
17 |
+
from langchain.vectorstores import FAISS, Weaviate
|
18 |
+
from pydantic import BaseModel
|
19 |
+
|
20 |
+
|
21 |
+
class CustomChain(Chain, BaseModel):
|
22 |
+
|
23 |
+
vstore: Weaviate
|
24 |
+
chain: BaseCombineDocumentsChain
|
25 |
+
key_word_extractor: Chain
|
26 |
+
|
27 |
+
@property
|
28 |
+
def input_keys(self) -> List[str]:
|
29 |
+
return ["question"]
|
30 |
+
|
31 |
+
@property
|
32 |
+
def output_keys(self) -> List[str]:
|
33 |
+
return ["answer"]
|
34 |
+
|
35 |
+
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
|
36 |
+
question = inputs["question"]
|
37 |
+
chat_history_str = _get_chat_history(inputs["chat_history"])
|
38 |
+
if chat_history_str:
|
39 |
+
new_question = self.key_word_extractor.run(
|
40 |
+
question=question, chat_history=chat_history_str
|
41 |
+
)
|
42 |
+
else:
|
43 |
+
new_question = question
|
44 |
+
print(new_question)
|
45 |
+
docs = self.vstore.similarity_search(new_question, k=4)
|
46 |
+
new_inputs = inputs.copy()
|
47 |
+
new_inputs["question"] = new_question
|
48 |
+
new_inputs["chat_history"] = chat_history_str
|
49 |
+
answer, _ = self.chain.combine_docs(docs, **new_inputs)
|
50 |
+
return {"answer": answer}
|
51 |
+
|
52 |
+
|
53 |
+
def get_new_chain1(vectorstore) -> Chain:
|
54 |
+
WEAVIATE_URL = os.environ["WEAVIATE_URL"]
|
55 |
+
client = weaviate.Client(
|
56 |
+
url=WEAVIATE_URL,
|
57 |
+
additional_headers={"X-OpenAI-Api-Key": os.environ["OPENAI_API_KEY"]},
|
58 |
+
)
|
59 |
+
|
60 |
+
_eg_template = """## Example:
|
61 |
+
|
62 |
+
Chat History:
|
63 |
+
{chat_history}
|
64 |
+
Follow Up Input: {question}
|
65 |
+
Standalone question: {answer}"""
|
66 |
+
_eg_prompt = PromptTemplate(
|
67 |
+
template=_eg_template,
|
68 |
+
input_variables=["chat_history", "question", "answer"],
|
69 |
+
)
|
70 |
+
|
71 |
+
_prefix = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question. You should assume that the question is related to LangChain."""
|
72 |
+
_suffix = """## Example:
|
73 |
+
|
74 |
+
Chat History:
|
75 |
+
{chat_history}
|
76 |
+
Follow Up Input: {question}
|
77 |
+
Standalone question:"""
|
78 |
+
eg_store = Weaviate(
|
79 |
+
client,
|
80 |
+
"Rephrase",
|
81 |
+
"content",
|
82 |
+
attributes=["question", "answer", "chat_history"],
|
83 |
+
)
|
84 |
+
example_selector = SemanticSimilarityExampleSelector(vectorstore=eg_store, k=4)
|
85 |
+
prompt = FewShotPromptTemplate(
|
86 |
+
prefix=_prefix,
|
87 |
+
suffix=_suffix,
|
88 |
+
example_selector=example_selector,
|
89 |
+
example_prompt=_eg_prompt,
|
90 |
+
input_variables=["question", "chat_history"],
|
91 |
+
)
|
92 |
+
llm = OpenAI(temperature=0, model_name="text-davinci-003")
|
93 |
+
key_word_extractor = LLMChain(llm=llm, prompt=prompt)
|
94 |
+
|
95 |
+
EXAMPLE_PROMPT = PromptTemplate(
|
96 |
+
template=">Example:\nContent:\n---------\n{page_content}\n----------\nSource: {source}",
|
97 |
+
input_variables=["page_content", "source"],
|
98 |
+
)
|
99 |
+
template = """You are an AI assistant for the open source library LangChain. The documentation is located at https://langchain.readthedocs.io.
|
100 |
+
You are given the following extracted parts of a long document and a question. Provide a conversational answer with a hyperlink to the documentation.
|
101 |
+
You should only use hyperlinks that are explicitly listed as a source in the context. Do NOT make up a hyperlink that is not listed.
|
102 |
+
If the question includes a request for code, provide a code block directly from the documentation.
|
103 |
+
If you don't know the answer, just say "Hmm, I'm not sure." Don't try to make up an answer.
|
104 |
+
If the question is not about LangChain, politely inform them that you are tuned to only answer questions about LangChain.
|
105 |
+
Question: {question}
|
106 |
+
=========
|
107 |
+
{context}
|
108 |
+
=========
|
109 |
+
Answer in Markdown:"""
|
110 |
+
PROMPT = PromptTemplate(template=template, input_variables=["question", "context"])
|
111 |
+
doc_chain = load_qa_chain(
|
112 |
+
OpenAI(temperature=0, model_name="text-davinci-003", max_tokens=-1),
|
113 |
+
chain_type="stuff",
|
114 |
+
prompt=PROMPT,
|
115 |
+
document_prompt=EXAMPLE_PROMPT,
|
116 |
+
)
|
117 |
+
return CustomChain(
|
118 |
+
chain=doc_chain, vstore=vectorstore, key_word_extractor=key_word_extractor
|
119 |
+
)
|
120 |
+
|
121 |
+
|
122 |
+
def _get_chat_history(chat_history: List[Tuple[str, str]]):
|
123 |
+
buffer = ""
|
124 |
+
for human_s, ai_s in chat_history:
|
125 |
+
human = f"Human: " + human_s
|
126 |
+
ai = f"Assistant: " + ai_s
|
127 |
+
buffer += "\n" + "\n".join([human, ai])
|
128 |
+
return buffer
|
ingest.py
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Load html from files, clean up, split, ingest into Weaviate."""
|
2 |
+
import os
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
import weaviate
|
6 |
+
from bs4 import BeautifulSoup
|
7 |
+
from langchain.text_splitter import CharacterTextSplitter
|
8 |
+
|
9 |
+
# def clean_data(data):
|
10 |
+
# soup = BeautifulSoup(data)
|
11 |
+
# text = soup.find_all("main", {"id": "main-content"})[0].get_text()
|
12 |
+
# return "\n".join([t for t in text.split("\n") if t])
|
13 |
+
|
14 |
+
# docs = []
|
15 |
+
# metadatas = []
|
16 |
+
# for p in Path("langchain.readthedocs.io/en/latest/").rglob("*"):
|
17 |
+
# if p.is_dir():
|
18 |
+
# continue
|
19 |
+
# with open(p) as f:
|
20 |
+
# docs.append(clean_data(f.read()))
|
21 |
+
# metadatas.append({"source": p})
|
22 |
+
|
23 |
+
with open('paper-dir/main.txt') as f:
|
24 |
+
paper_text = f.read()
|
25 |
+
|
26 |
+
docs = paper_text.split("§")
|
27 |
+
# metadatas is the first word that comes after the section symbol
|
28 |
+
metadatas = [doc.split(" ")[0] for doc in docs]
|
29 |
+
|
30 |
+
text_splitter = CharacterTextSplitter(
|
31 |
+
separator="\n",
|
32 |
+
chunk_size=1000,
|
33 |
+
chunk_overlap=200,
|
34 |
+
length_function=len,
|
35 |
+
)
|
36 |
+
|
37 |
+
documents = text_splitter.create_documents(docs, metadatas=metadatas)
|
38 |
+
|
39 |
+
|
40 |
+
WEAVIATE_URL = os.environ["WEAVIATE_URL"]
|
41 |
+
client = weaviate.Client(
|
42 |
+
url=WEAVIATE_URL,
|
43 |
+
additional_headers={"X-OpenAI-Api-Key": os.environ["OPENAI_API_KEY"]},
|
44 |
+
)
|
45 |
+
|
46 |
+
client.schema.delete_class("Paragraph")
|
47 |
+
client.schema.get()
|
48 |
+
schema = {
|
49 |
+
"classes": [
|
50 |
+
{
|
51 |
+
"class": "Paragraph",
|
52 |
+
"description": "A written paragraph",
|
53 |
+
"vectorizer": "text2vec-openai",
|
54 |
+
"moduleConfig": {
|
55 |
+
"text2vec-openai": {
|
56 |
+
"model": "ada",
|
57 |
+
"modelVersion": "002",
|
58 |
+
"type": "text",
|
59 |
+
}
|
60 |
+
},
|
61 |
+
"properties": [
|
62 |
+
{
|
63 |
+
"dataType": ["text"],
|
64 |
+
"description": "The content of the paragraph",
|
65 |
+
"moduleConfig": {
|
66 |
+
"text2vec-openai": {
|
67 |
+
"skip": False,
|
68 |
+
"vectorizePropertyName": False,
|
69 |
+
}
|
70 |
+
},
|
71 |
+
"name": "content",
|
72 |
+
},
|
73 |
+
{
|
74 |
+
"dataType": ["text"],
|
75 |
+
"description": "The link",
|
76 |
+
"moduleConfig": {
|
77 |
+
"text2vec-openai": {
|
78 |
+
"skip": True,
|
79 |
+
"vectorizePropertyName": False,
|
80 |
+
}
|
81 |
+
},
|
82 |
+
"name": "source",
|
83 |
+
},
|
84 |
+
],
|
85 |
+
},
|
86 |
+
]
|
87 |
+
}
|
88 |
+
|
89 |
+
client.schema.create(schema)
|
90 |
+
|
91 |
+
with client.batch as batch:
|
92 |
+
for text in documents:
|
93 |
+
batch.add_data_object(
|
94 |
+
{
|
95 |
+
"content": text.page_content,
|
96 |
+
"source": str(text.metadata["source"])
|
97 |
+
},
|
98 |
+
"Paragraph",
|
99 |
+
)
|
ingest.sh
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Bash script to ingest data
|
2 |
+
# This involves scraping the data from the web and then cleaning up and putting in Weaviate.
|
3 |
+
!set -eu
|
4 |
+
wget -r -A.html https://langchain.readthedocs.io/en/latest/
|
5 |
+
python3 ingest.py
|
6 |
+
python3 ingest_examples.py
|
ingest_examples.py
ADDED
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Ingest examples into Weaviate."""
|
2 |
+
import os
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
import weaviate
|
6 |
+
|
7 |
+
WEAVIATE_URL = os.environ["WEAVIATE_URL"]
|
8 |
+
client = weaviate.Client(
|
9 |
+
url=WEAVIATE_URL,
|
10 |
+
additional_headers={"X-OpenAI-Api-Key": os.environ["OPENAI_API_KEY"]},
|
11 |
+
)
|
12 |
+
|
13 |
+
client.schema.delete_class("Rephrase")
|
14 |
+
client.schema.delete_class("QA")
|
15 |
+
client.schema.get()
|
16 |
+
schema = {
|
17 |
+
"classes": [
|
18 |
+
{
|
19 |
+
"class": "Rephrase",
|
20 |
+
"description": "Rephrase Examples",
|
21 |
+
"vectorizer": "text2vec-openai",
|
22 |
+
"moduleConfig": {
|
23 |
+
"text2vec-openai": {
|
24 |
+
"model": "ada",
|
25 |
+
"modelVersion": "002",
|
26 |
+
"type": "text",
|
27 |
+
}
|
28 |
+
},
|
29 |
+
"properties": [
|
30 |
+
{
|
31 |
+
"dataType": ["text"],
|
32 |
+
"moduleConfig": {
|
33 |
+
"text2vec-openai": {
|
34 |
+
"skip": False,
|
35 |
+
"vectorizePropertyName": False,
|
36 |
+
}
|
37 |
+
},
|
38 |
+
"name": "content",
|
39 |
+
},
|
40 |
+
{
|
41 |
+
"dataType": ["text"],
|
42 |
+
"description": "The link",
|
43 |
+
"moduleConfig": {
|
44 |
+
"text2vec-openai": {
|
45 |
+
"skip": True,
|
46 |
+
"vectorizePropertyName": False,
|
47 |
+
}
|
48 |
+
},
|
49 |
+
"name": "question",
|
50 |
+
},
|
51 |
+
{
|
52 |
+
"dataType": ["text"],
|
53 |
+
"description": "The link",
|
54 |
+
"moduleConfig": {
|
55 |
+
"text2vec-openai": {
|
56 |
+
"skip": True,
|
57 |
+
"vectorizePropertyName": False,
|
58 |
+
}
|
59 |
+
},
|
60 |
+
"name": "answer",
|
61 |
+
},
|
62 |
+
{
|
63 |
+
"dataType": ["text"],
|
64 |
+
"description": "The link",
|
65 |
+
"moduleConfig": {
|
66 |
+
"text2vec-openai": {
|
67 |
+
"skip": True,
|
68 |
+
"vectorizePropertyName": False,
|
69 |
+
}
|
70 |
+
},
|
71 |
+
"name": "chat_history",
|
72 |
+
},
|
73 |
+
],
|
74 |
+
},
|
75 |
+
]
|
76 |
+
}
|
77 |
+
|
78 |
+
client.schema.create(schema)
|
79 |
+
|
80 |
+
documents = [
|
81 |
+
{
|
82 |
+
"question": "how do i load those?",
|
83 |
+
"chat_history": "Human: What types of memory exist?\nAssistant: \n\nThere are a few different types of memory: Buffer, Summary, and Conversational Memory.",
|
84 |
+
"answer": "How do I load Buffer, Summary, and Conversational Memory",
|
85 |
+
},
|
86 |
+
{
|
87 |
+
"question": "how do i install this package?",
|
88 |
+
"chat_history": "",
|
89 |
+
"answer": "How do I install langchain?",
|
90 |
+
},
|
91 |
+
{
|
92 |
+
"question": "how do I set serpapi_api_key?",
|
93 |
+
"chat_history": "Human: can you write me a code snippet for that?\nAssistant: \n\nYes, you can create an Agent with a custom LLMChain in LangChain. Here is a [link](https://langchain.readthedocs.io/en/latest/modules/agents/examples/custom_agent.html) to the documentation that provides a code snippet for creating a custom Agent.",
|
94 |
+
"answer": "How do I set the serpapi_api_key?",
|
95 |
+
},
|
96 |
+
{
|
97 |
+
"question": "What are some methods for data augmented generation?",
|
98 |
+
"chat_history": "Human: List all methods of an Agent class please\nAssistant: \n\nTo answer your question, you can find a list of all the methods of the Agent class in the [API reference documentation](https://langchain.readthedocs.io/en/latest/modules/agents/reference.html).",
|
99 |
+
"answer": "What are some methods for data augmented generation?",
|
100 |
+
},
|
101 |
+
{
|
102 |
+
"question": "can you write me a code snippet for that?",
|
103 |
+
"chat_history": "Human: how do I create an agent with custom LLMChain?\nAssistant: \n\nTo create an Agent with a custom LLMChain in LangChain, you can use the [Custom Agent example](https://langchain.readthedocs.io/en/latest/modules/agents/examples/custom_agent.html). This example shows how to create a custom LLMChain and use an existing Agent class to parse the output. For more information on Agents and Tools, check out the [Key Concepts](https://langchain.readthedocs.io/en/latest/modules/agents/key_concepts.html) documentation.",
|
104 |
+
"answer": "Can you provide a code snippet for creating an Agent with a custom LLMChain?",
|
105 |
+
},
|
106 |
+
]
|
107 |
+
from langchain.prompts.example_selector.semantic_similarity import \
|
108 |
+
sorted_values
|
109 |
+
|
110 |
+
for d in documents:
|
111 |
+
d["content"] = " ".join(sorted_values(d))
|
112 |
+
with client.batch as batch:
|
113 |
+
for text in documents:
|
114 |
+
batch.add_data_object(
|
115 |
+
text,
|
116 |
+
"Rephrase",
|
117 |
+
)
|
118 |
+
|
119 |
+
client.schema.get()
|
120 |
+
schema = {
|
121 |
+
"classes": [
|
122 |
+
{
|
123 |
+
"class": "QA",
|
124 |
+
"description": "Rephrase Examples",
|
125 |
+
"vectorizer": "text2vec-openai",
|
126 |
+
"moduleConfig": {
|
127 |
+
"text2vec-openai": {
|
128 |
+
"model": "ada",
|
129 |
+
"modelVersion": "002",
|
130 |
+
"type": "text",
|
131 |
+
}
|
132 |
+
},
|
133 |
+
"properties": [
|
134 |
+
{
|
135 |
+
"dataType": ["text"],
|
136 |
+
"moduleConfig": {
|
137 |
+
"text2vec-openai": {
|
138 |
+
"skip": False,
|
139 |
+
"vectorizePropertyName": False,
|
140 |
+
}
|
141 |
+
},
|
142 |
+
"name": "content",
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"dataType": ["text"],
|
146 |
+
"description": "The link",
|
147 |
+
"moduleConfig": {
|
148 |
+
"text2vec-openai": {
|
149 |
+
"skip": True,
|
150 |
+
"vectorizePropertyName": False,
|
151 |
+
}
|
152 |
+
},
|
153 |
+
"name": "question",
|
154 |
+
},
|
155 |
+
{
|
156 |
+
"dataType": ["text"],
|
157 |
+
"description": "The link",
|
158 |
+
"moduleConfig": {
|
159 |
+
"text2vec-openai": {
|
160 |
+
"skip": True,
|
161 |
+
"vectorizePropertyName": False,
|
162 |
+
}
|
163 |
+
},
|
164 |
+
"name": "answer",
|
165 |
+
},
|
166 |
+
{
|
167 |
+
"dataType": ["text"],
|
168 |
+
"description": "The link",
|
169 |
+
"moduleConfig": {
|
170 |
+
"text2vec-openai": {
|
171 |
+
"skip": True,
|
172 |
+
"vectorizePropertyName": False,
|
173 |
+
}
|
174 |
+
},
|
175 |
+
"name": "summaries",
|
176 |
+
},
|
177 |
+
{
|
178 |
+
"dataType": ["text"],
|
179 |
+
"description": "The link",
|
180 |
+
"moduleConfig": {
|
181 |
+
"text2vec-openai": {
|
182 |
+
"skip": True,
|
183 |
+
"vectorizePropertyName": False,
|
184 |
+
}
|
185 |
+
},
|
186 |
+
"name": "sources",
|
187 |
+
},
|
188 |
+
],
|
189 |
+
},
|
190 |
+
]
|
191 |
+
}
|
192 |
+
|
193 |
+
client.schema.create(schema)
|
194 |
+
|
195 |
+
documents = [
|
196 |
+
{
|
197 |
+
"question": "how do i install langchain?",
|
198 |
+
"answer": "```pip install langchain```",
|
199 |
+
"summaries": ">Example:\nContent:\n---------\nYou can pip install langchain package by running 'pip install langchain'\n----------\nSource: foo.html",
|
200 |
+
"sources": "foo.html",
|
201 |
+
},
|
202 |
+
{
|
203 |
+
"question": "how do i import an openai LLM?",
|
204 |
+
"answer": "```from langchain.llm import OpenAI```",
|
205 |
+
"summaries": ">Example:\nContent:\n---------\nyou can import the open ai wrapper (OpenAI) from the langchain.llm module\n----------\nSource: bar.html",
|
206 |
+
"sources": "bar.html",
|
207 |
+
},
|
208 |
+
]
|
209 |
+
from langchain.prompts.example_selector.semantic_similarity import \
|
210 |
+
sorted_values
|
211 |
+
|
212 |
+
for d in documents:
|
213 |
+
d["content"] = " ".join(sorted_values(d))
|
214 |
+
with client.batch as batch:
|
215 |
+
for text in documents:
|
216 |
+
batch.add_data_object(
|
217 |
+
text,
|
218 |
+
"QA",
|
219 |
+
)
|
ingest_faiss.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pathlib import Path
|
2 |
+
from langchain.text_splitter import CharacterTextSplitter
|
3 |
+
import faiss
|
4 |
+
from langchain.vectorstores import FAISS
|
5 |
+
from langchain.embeddings import OpenAIEmbeddings
|
6 |
+
import pickle
|
7 |
+
|
8 |
+
def create_vector_store(suffix, paper_text):
|
9 |
+
# with open('paper-dir/main.txt') as f:
|
10 |
+
# paper_text = f.read()
|
11 |
+
|
12 |
+
split_chars = ["§", "§.§"]
|
13 |
+
data = []
|
14 |
+
for c in split_chars:
|
15 |
+
paper_text = paper_text.replace(c, "§")
|
16 |
+
data = paper_text.split("§")
|
17 |
+
|
18 |
+
# metadatas is the rest of the text on the same line as the section symbol
|
19 |
+
sources = []
|
20 |
+
for d in data:
|
21 |
+
sources.append(d.split("\n")[0].strip())
|
22 |
+
# data = [d.split("\n")[1:] for d in data]
|
23 |
+
|
24 |
+
sources[0] = "Beginning of paper"
|
25 |
+
|
26 |
+
# Here we split the documents, as needed, into smaller chunks.
|
27 |
+
# We do this due to the context limits of the LLMs.
|
28 |
+
text_splitter = CharacterTextSplitter(chunk_size=1500, separator="\n")
|
29 |
+
docs = []
|
30 |
+
metadatas = []
|
31 |
+
for i, d in enumerate(data):
|
32 |
+
splits = text_splitter.split_text(d)
|
33 |
+
docs.extend(splits)
|
34 |
+
metadatas.extend([{"source": sources[i]}] * len(splits))
|
35 |
+
|
36 |
+
# Here we create a vector store from the documents and save it to disk.
|
37 |
+
store = FAISS.from_texts(docs, OpenAIEmbeddings(), metadatas=metadatas)
|
38 |
+
faiss.write_index(store.index, f"{suffix}/docs.index")
|
39 |
+
store.index = None
|
40 |
+
with open(f"{suffix}/faiss_store.pkl", "wb") as f:
|
41 |
+
pickle.dump(store, f)
|
paper
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9aeb23e3ec5eb22876c9c3b823841a82706cabe5aeacbe1783686a1db8aa0285
|
3 |
+
size 10862082
|
qa.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Ask a question to the notion database."""
|
2 |
+
import faiss
|
3 |
+
from langchain import OpenAI
|
4 |
+
from langchain.chains import VectorDBQAWithSourcesChain
|
5 |
+
import pickle
|
6 |
+
import argparse
|
7 |
+
|
8 |
+
parser = argparse.ArgumentParser(description='Ask a question about the paper')
|
9 |
+
parser.add_argument('question', type=str, help='The question to ask about the paper')
|
10 |
+
args = parser.parse_args()
|
11 |
+
|
12 |
+
# Load the LangChain.
|
13 |
+
index = faiss.read_index("docs.index")
|
14 |
+
|
15 |
+
with open("faiss_store.pkl", "rb") as f:
|
16 |
+
store = pickle.load(f)
|
17 |
+
|
18 |
+
store.index = index
|
19 |
+
chain = VectorDBQAWithSourcesChain.from_llm(llm=OpenAI(temperature=0), vectorstore=store)
|
20 |
+
result = chain({"question": args.question})
|
21 |
+
print(f"Answer: {result['answer']}")
|
22 |
+
sources = result["sources"].split(", ")
|
23 |
+
sources = [s.title() for s in sources]
|
24 |
+
|
25 |
+
import code
|
26 |
+
code.interact(local=locals())
|
requirements.txt
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
langchain==0.0.64
|
2 |
+
beautifulsoup4
|
3 |
+
weaviate-client
|
4 |
+
openai
|
5 |
+
black
|
6 |
+
isort
|
7 |
+
Flask
|
8 |
+
transformers
|
9 |
+
gradio
|
10 |
+
pylatexenc
|