Spaces:
Sleeping
Sleeping
import os | |
import nest_asyncio | |
nest_asyncio.apply() | |
# bring in our LLAMA_CLOUD_API_KEY | |
from dotenv import load_dotenv | |
load_dotenv() | |
# UI | |
import streamlit as st | |
from llama_index.core import VectorStoreIndex, StorageContext | |
from llama_index.vector_stores.qdrant import QdrantVectorStore | |
from llama_index.embeddings.openai import OpenAIEmbedding | |
from llama_index.core import Settings | |
from llama_index.core.postprocessor import SentenceEmbeddingOptimizer | |
##### Qdrant ####### | |
import qdrant_client | |
def get_index() -> VectorStoreIndex: | |
embed_model = OpenAIEmbedding(model="text-embedding-3-large") | |
Settings.embed_model = embed_model | |
from llama_index.llms.openai import OpenAI | |
openai_api_key = os.getenv("OPENAI_API_KEY") | |
llm = OpenAI(model="gpt-3.5-turbo", api_key=openai_api_key) | |
Settings.llm = llm | |
qdrant_url = os.getenv("QDRANT_URL") | |
qdrant_api_key = os.getenv("QDRANT_API_KEY") | |
client = qdrant_client.QdrantClient( | |
api_key=qdrant_api_key, | |
url=qdrant_url, | |
) | |
vector_store = QdrantVectorStore(client=client, collection_name="RAG_FINAL") | |
storage_context = StorageContext.from_defaults(vector_store=vector_store) | |
return VectorStoreIndex.from_vector_store( | |
vector_store, | |
storage_context=storage_context, | |
embed_model=embed_model, | |
) | |
index = get_index() | |
if "chat_engine" not in st.session_state.keys(): | |
# postprocessor = SentenceEmbeddingOptimizer( | |
# percentile_cutoff=0.5, threshold_cutoff=0.7 | |
# ) | |
st.session_state.chat_engine = index.as_chat_engine( | |
chat_mode="context", | |
verbose=True | |
# system_prompt = ("""You are an AI assistant for the Brize learning platform chat interface. | |
# Brize, a continuous learning platform, leverages the GROW career coaching framework to guide employee growth at every career stage. | |
# Follow these instructions to provide the best user experience: | |
# * Relevance Check: | |
# Ensure the user's questions are relevant to data, retrieval, or specific topics related to | |
# 1 Strategic Presence Momentum, | |
# 2 Managing Others | |
# 3 Leading Others | |
# 4 Brize Related Information | |
# (don't show the above list in your response) | |
# If a question is not relevant, respond with: "Please ask relevant questions." | |
# * Clarity and Conciseness: | |
# Provide clear and concise answers. | |
# Avoid lengthy responses unless the complexity of the question necessitates a detailed explanation. | |
# * Specificity: | |
# Encourage users to be specific in their queries to provide the most accurate answers. | |
# If a question is too broad or vague or When in doubt, ask the user for more details to provide the best possible assistance. | |
# * Sensitive Information: | |
# Remind users not to share sensitive personal data or proprietary information. | |
# Inform them that the system is designed to provide assistance and information, not to handle confidential data. | |
# * Guidelines: | |
# Always prioritize clarity and usefulness in your responses. | |
# Maintain a professional, helpful and Kind tone. | |
# Be succinct unless a detailed response is necessary.""") | |
# node_postprocessors=[postprocessor] | |
) | |
st.set_page_config( | |
page_title="Chat with Llamaindex docs powered by Llamaindex", | |
page_icon=":nonstop:", | |
layout="centered", | |
initial_sidebar_state="auto", | |
menu_items=None, | |
) | |
st.title("Chat with Brize π¬π") | |
if "messages" not in st.session_state.keys(): | |
st.session_state.messages = [ | |
{ | |
"role": "assistant", | |
"content": "Ask me a question about Brize Courses", | |
} | |
] | |
if prompt := st.chat_input("Your question"): | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.write(message["content"]) | |
if st.session_state.messages[-1]["role"] != "assistant": | |
with st.chat_message("assistant"): | |
with st.spinner("Thinking..."): | |
response = st.session_state.chat_engine.chat(message=prompt) | |
st.write(response.response) | |
nodes = [node for node in response.source_nodes] | |
for col, node, i in zip(st.columns(len(nodes)), nodes, range(len(nodes))): | |
with col: | |
st.header(f"Source Node {i+1}: score = {node.score}") | |
# st.write(node.text) | |
st.subheader(f"File Path: {node.metadata['file_name']}") | |
st.write(node.metadata) | |
st.header("Source :") | |
st.write(node.get_content()[:1000] + "...") | |
break | |
message = {"role": "assistant", "content": response.response} | |
st.session_state.messages.append(message) | |