Spaces:
Sleeping
Sleeping
Nikhil-Murade
commited on
Commit
•
befaea8
1
Parent(s):
ed217fe
Upload 3 files
Browse files- ingestion.py +70 -0
- main.py +138 -0
- requirements.txt +123 -0
ingestion.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import nest_asyncio
|
3 |
+
|
4 |
+
nest_asyncio.apply()
|
5 |
+
|
6 |
+
# bring in our LLAMA_CLOUD_API_KEY
|
7 |
+
from dotenv import load_dotenv
|
8 |
+
|
9 |
+
load_dotenv()
|
10 |
+
|
11 |
+
##### LLAMAPARSE #####
|
12 |
+
from llama_parse import LlamaParse
|
13 |
+
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext
|
14 |
+
from llama_index.vector_stores.qdrant import QdrantVectorStore
|
15 |
+
from llama_index.embeddings.openai import OpenAIEmbedding
|
16 |
+
from llama_index.core import Settings
|
17 |
+
|
18 |
+
|
19 |
+
##### Qdrant #######
|
20 |
+
import qdrant_client
|
21 |
+
from qdrant_client import QdrantClient, models
|
22 |
+
|
23 |
+
|
24 |
+
llamaparse_api_key = os.getenv("LLAMA_CLOUD_API_KEY")
|
25 |
+
|
26 |
+
|
27 |
+
# set up parser
|
28 |
+
parser = LlamaParse(api_key=llamaparse_api_key, result_type="text")
|
29 |
+
|
30 |
+
# use SimpleDirectoryReader to parse our file
|
31 |
+
file_extractor = {".pdf": parser}
|
32 |
+
documents = SimpleDirectoryReader(
|
33 |
+
input_dir="./documents", file_extractor=file_extractor
|
34 |
+
).load_data()
|
35 |
+
|
36 |
+
|
37 |
+
qdrant_url = os.getenv("QDRANT_URL")
|
38 |
+
qdrant_api_key = os.getenv("QDRANT_API_KEY")
|
39 |
+
|
40 |
+
|
41 |
+
embed_model = OpenAIEmbedding(model="text-embedding-3-large")
|
42 |
+
Settings.embed_model = embed_model
|
43 |
+
|
44 |
+
from llama_index.llms.openai import OpenAI
|
45 |
+
|
46 |
+
openai_api_key = os.getenv("OPENAI_API_KEY")
|
47 |
+
|
48 |
+
|
49 |
+
llm = OpenAI(model="gpt-3.5-turbo", api_key=openai_api_key)
|
50 |
+
|
51 |
+
|
52 |
+
Settings.llm = llm
|
53 |
+
client = qdrant_client.QdrantClient(
|
54 |
+
api_key=qdrant_api_key,
|
55 |
+
url=qdrant_url,
|
56 |
+
)
|
57 |
+
|
58 |
+
###Creating New Collection on Qdrant Not needed###
|
59 |
+
# client.create_collection(
|
60 |
+
# collection_name="RAG_test",
|
61 |
+
# vectors_config=models.VectorParams(size=1536, distance=models.Distance.COSINE),
|
62 |
+
# )
|
63 |
+
|
64 |
+
vector_store = QdrantVectorStore(client=client, collection_name="RAG_Test")
|
65 |
+
storage_context = StorageContext.from_defaults(vector_store=vector_store)
|
66 |
+
index = VectorStoreIndex.from_documents(
|
67 |
+
documents=documents, storage_context=storage_context, show_progress=True
|
68 |
+
)
|
69 |
+
|
70 |
+
index.storage_context.persist()
|
main.py
ADDED
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import nest_asyncio
|
3 |
+
|
4 |
+
nest_asyncio.apply()
|
5 |
+
|
6 |
+
# bring in our LLAMA_CLOUD_API_KEY
|
7 |
+
from dotenv import load_dotenv
|
8 |
+
|
9 |
+
load_dotenv()
|
10 |
+
|
11 |
+
# UI
|
12 |
+
import streamlit as st
|
13 |
+
|
14 |
+
|
15 |
+
from llama_index.core import VectorStoreIndex, StorageContext
|
16 |
+
from llama_index.vector_stores.qdrant import QdrantVectorStore
|
17 |
+
from llama_index.embeddings.openai import OpenAIEmbedding
|
18 |
+
from llama_index.core import Settings
|
19 |
+
from llama_index.core.postprocessor import SentenceEmbeddingOptimizer
|
20 |
+
|
21 |
+
|
22 |
+
##### Qdrant #######
|
23 |
+
import qdrant_client
|
24 |
+
|
25 |
+
|
26 |
+
@st.cache_resource(show_spinner=False)
|
27 |
+
def get_index() -> VectorStoreIndex:
|
28 |
+
|
29 |
+
embed_model = OpenAIEmbedding(model="text-embedding-3-large")
|
30 |
+
Settings.embed_model = embed_model
|
31 |
+
|
32 |
+
|
33 |
+
from llama_index.llms.openai import OpenAI
|
34 |
+
openai_api_key = os.getenv("OPENAI_API_KEY")
|
35 |
+
|
36 |
+
llm = OpenAI(model="gpt-3.5-turbo", api_key=openai_api_key)
|
37 |
+
Settings.llm = llm
|
38 |
+
|
39 |
+
qdrant_url = os.getenv("QDRANT_URL")
|
40 |
+
qdrant_api_key = os.getenv("QDRANT_API_KEY")
|
41 |
+
|
42 |
+
|
43 |
+
client = qdrant_client.QdrantClient(
|
44 |
+
api_key=qdrant_api_key,
|
45 |
+
url=qdrant_url,
|
46 |
+
)
|
47 |
+
vector_store = QdrantVectorStore(client=client, collection_name="RAG_FINAL")
|
48 |
+
storage_context = StorageContext.from_defaults(vector_store=vector_store)
|
49 |
+
|
50 |
+
return VectorStoreIndex.from_vector_store(
|
51 |
+
vector_store,
|
52 |
+
storage_context=storage_context,
|
53 |
+
embed_model=embed_model,
|
54 |
+
)
|
55 |
+
|
56 |
+
|
57 |
+
index = get_index()
|
58 |
+
if "chat_engine" not in st.session_state.keys():
|
59 |
+
# postprocessor = SentenceEmbeddingOptimizer(
|
60 |
+
# percentile_cutoff=0.5, threshold_cutoff=0.7
|
61 |
+
# )
|
62 |
+
|
63 |
+
|
64 |
+
st.session_state.chat_engine = index.as_chat_engine(
|
65 |
+
chat_mode="context",
|
66 |
+
verbose=True
|
67 |
+
# system_prompt = ("""You are an AI assistant for the Brize learning platform chat interface.
|
68 |
+
# Brize, a continuous learning platform, leverages the GROW career coaching framework to guide employee growth at every career stage.
|
69 |
+
# Follow these instructions to provide the best user experience:
|
70 |
+
# * Relevance Check:
|
71 |
+
# Ensure the user's questions are relevant to data, retrieval, or specific topics related to
|
72 |
+
# 1 Strategic Presence Momentum,
|
73 |
+
# 2 Managing Others
|
74 |
+
# 3 Leading Others
|
75 |
+
# 4 Brize Related Information
|
76 |
+
# (don't show the above list in your response)
|
77 |
+
# If a question is not relevant, respond with: "Please ask relevant questions."
|
78 |
+
# * Clarity and Conciseness:
|
79 |
+
# Provide clear and concise answers.
|
80 |
+
# Avoid lengthy responses unless the complexity of the question necessitates a detailed explanation.
|
81 |
+
# * Specificity:
|
82 |
+
# Encourage users to be specific in their queries to provide the most accurate answers.
|
83 |
+
# If a question is too broad or vague or When in doubt, ask the user for more details to provide the best possible assistance.
|
84 |
+
# * Sensitive Information:
|
85 |
+
# Remind users not to share sensitive personal data or proprietary information.
|
86 |
+
# Inform them that the system is designed to provide assistance and information, not to handle confidential data.
|
87 |
+
# * Guidelines:
|
88 |
+
# Always prioritize clarity and usefulness in your responses.
|
89 |
+
# Maintain a professional, helpful and Kind tone.
|
90 |
+
# Be succinct unless a detailed response is necessary.""")
|
91 |
+
# node_postprocessors=[postprocessor]
|
92 |
+
)
|
93 |
+
|
94 |
+
|
95 |
+
st.set_page_config(
|
96 |
+
page_title="Chat with Llamaindex docs powered by Llamaindex",
|
97 |
+
page_icon=":nonstop:",
|
98 |
+
layout="centered",
|
99 |
+
initial_sidebar_state="auto",
|
100 |
+
menu_items=None,
|
101 |
+
)
|
102 |
+
|
103 |
+
|
104 |
+
st.title("Chat with Brize 💬📚")
|
105 |
+
|
106 |
+
if "messages" not in st.session_state.keys():
|
107 |
+
st.session_state.messages = [
|
108 |
+
{
|
109 |
+
"role": "assistant",
|
110 |
+
"content": "Ask me a question about Brize Courses",
|
111 |
+
}
|
112 |
+
]
|
113 |
+
|
114 |
+
if prompt := st.chat_input("Your question"):
|
115 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
116 |
+
|
117 |
+
for message in st.session_state.messages:
|
118 |
+
with st.chat_message(message["role"]):
|
119 |
+
st.write(message["content"])
|
120 |
+
|
121 |
+
|
122 |
+
if st.session_state.messages[-1]["role"] != "assistant":
|
123 |
+
with st.chat_message("assistant"):
|
124 |
+
with st.spinner("Thinking..."):
|
125 |
+
response = st.session_state.chat_engine.chat(message=prompt)
|
126 |
+
st.write(response.response)
|
127 |
+
nodes = [node for node in response.source_nodes]
|
128 |
+
for col, node, i in zip(st.columns(len(nodes)), nodes, range(len(nodes))):
|
129 |
+
with col:
|
130 |
+
st.header(f"Source Node {i+1}: score = {node.score}")
|
131 |
+
# st.write(node.text)
|
132 |
+
st.subheader(f"File Path: {node.metadata['file_name']}")
|
133 |
+
st.write(node.metadata)
|
134 |
+
st.header("Source :")
|
135 |
+
st.write(node.get_content()[:1000] + "...")
|
136 |
+
break
|
137 |
+
message = {"role": "assistant", "content": response.response}
|
138 |
+
st.session_state.messages.append(message)
|
requirements.txt
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
aiohttp==3.9.5
|
2 |
+
aiosignal==1.3.1
|
3 |
+
altair==5.3.0
|
4 |
+
annotated-types==0.7.0
|
5 |
+
anyio==4.4.0
|
6 |
+
async-timeout==4.0.3
|
7 |
+
attrs==23.2.0
|
8 |
+
beautifulsoup4==4.12.3
|
9 |
+
black==24.4.2
|
10 |
+
blinker==1.8.2
|
11 |
+
cachetools==5.3.3
|
12 |
+
certifi==2024.6.2
|
13 |
+
charset-normalizer==3.3.2
|
14 |
+
click==8.1.7
|
15 |
+
dataclasses-json==0.6.6
|
16 |
+
Deprecated==1.2.14
|
17 |
+
dirtyjson==1.0.8
|
18 |
+
distro==1.9.0
|
19 |
+
entrypoints==0.4
|
20 |
+
exceptiongroup==1.2.1
|
21 |
+
frozenlist==1.4.1
|
22 |
+
fsspec==2024.6.0
|
23 |
+
gitdb==4.0.11
|
24 |
+
GitPython==3.1.43
|
25 |
+
greenlet==3.0.3
|
26 |
+
grpcio==1.64.1
|
27 |
+
grpcio-tools==1.64.1
|
28 |
+
h11==0.14.0
|
29 |
+
h2==4.1.0
|
30 |
+
hpack==4.0.0
|
31 |
+
httpcore==1.0.5
|
32 |
+
httpx==0.27.0
|
33 |
+
hyperframe==6.0.1
|
34 |
+
idna==3.7
|
35 |
+
importlib_metadata==7.1.0
|
36 |
+
Jinja2==3.1.4
|
37 |
+
joblib==1.4.2
|
38 |
+
jsonschema==4.22.0
|
39 |
+
jsonschema-specifications==2023.12.1
|
40 |
+
llama-index==0.10.43
|
41 |
+
llama-index-agent-openai==0.2.7
|
42 |
+
llama-index-cli==0.1.12
|
43 |
+
llama-index-core==0.10.43
|
44 |
+
llama-index-embeddings-openai==0.1.10
|
45 |
+
llama-index-indices-managed-llama-cloud==0.1.6
|
46 |
+
llama-index-legacy==0.9.48
|
47 |
+
llama-index-llms-openai==0.1.22
|
48 |
+
llama-index-multi-modal-llms-openai==0.1.6
|
49 |
+
llama-index-program-openai==0.1.6
|
50 |
+
llama-index-question-gen-openai==0.1.3
|
51 |
+
llama-index-readers-file==0.1.23
|
52 |
+
llama-index-readers-llama-parse==0.1.4
|
53 |
+
llama-index-vector-stores-qdrant==0.2.8
|
54 |
+
llama-parse==0.4.4
|
55 |
+
llamaindex-py-client==0.1.19
|
56 |
+
loguru==0.7.2
|
57 |
+
markdown-it-py==3.0.0
|
58 |
+
MarkupSafe==2.1.5
|
59 |
+
marshmallow==3.21.2
|
60 |
+
mdurl==0.1.2
|
61 |
+
multidict==6.0.5
|
62 |
+
mypy-extensions==1.0.0
|
63 |
+
nest-asyncio==1.6.0
|
64 |
+
networkx==3.3
|
65 |
+
nltk==3.8.1
|
66 |
+
numpy==1.26.4
|
67 |
+
openai==1.31.0
|
68 |
+
packaging==24.0
|
69 |
+
pandas==2.2.2
|
70 |
+
pathspec==0.12.1
|
71 |
+
pillow==10.3.0
|
72 |
+
platformdirs==4.2.2
|
73 |
+
portalocker==2.8.2
|
74 |
+
protobuf==3.20.3
|
75 |
+
pyarrow==16.1.0
|
76 |
+
pydantic==2.7.3
|
77 |
+
pydantic_core==2.18.4
|
78 |
+
pydeck==0.9.1
|
79 |
+
Pygments==2.18.0
|
80 |
+
Pympler==1.0.1
|
81 |
+
pypdf==4.2.0
|
82 |
+
python-dateutil==2.9.0.post0
|
83 |
+
python-dotenv==1.0.1
|
84 |
+
pytz==2024.1
|
85 |
+
PyYAML==6.0.1
|
86 |
+
qdrant-client==1.9.1
|
87 |
+
referencing==0.35.1
|
88 |
+
regex==2024.5.15
|
89 |
+
requests==2.32.3
|
90 |
+
rich==13.7.1
|
91 |
+
rpds-py==0.18.1
|
92 |
+
scikit-learn==1.0.2
|
93 |
+
scipy==1.13.1
|
94 |
+
semver==3.0.2
|
95 |
+
shellingham==1.5.4
|
96 |
+
six==1.16.0
|
97 |
+
smmap==5.0.1
|
98 |
+
sniffio==1.3.1
|
99 |
+
soupsieve==2.5
|
100 |
+
SQLAlchemy==2.0.30
|
101 |
+
streamlit==1.35.0
|
102 |
+
striprtf==0.0.26
|
103 |
+
tenacity==8.3.0
|
104 |
+
threadpoolctl==3.5.0
|
105 |
+
tiktoken==0.7.0
|
106 |
+
toml==0.10.2
|
107 |
+
tomli==2.0.1
|
108 |
+
toolz==0.12.1
|
109 |
+
tornado==6.4
|
110 |
+
tqdm==4.66.4
|
111 |
+
trubrics==1.3.6
|
112 |
+
typeguard==2.13.3
|
113 |
+
typer==0.12.3
|
114 |
+
typing-inspect==0.9.0
|
115 |
+
typing_extensions==4.12.1
|
116 |
+
tzdata==2024.1
|
117 |
+
tzlocal==5.2
|
118 |
+
urllib3==2.2.1
|
119 |
+
validators==0.28.3
|
120 |
+
watchdog==4.0.1
|
121 |
+
wrapt==1.16.0
|
122 |
+
yarl==1.9.4
|
123 |
+
zipp==3.19.2
|