Ritesh-hf's picture
Update app.py
417f8da verified
raw
history blame
6.65 kB
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_huggingface.embeddings import HuggingFaceEmbeddings
from langchain.retrievers.document_compressors import EmbeddingsFilter
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers import EnsembleRetriever
from langchain_community.vectorstores import FAISS
from langchain_groq import ChatGroq
from pinecone import Pinecone, ServerlessSpec
from pinecone_text.sparse import BM25Encoder
from langchain import hub
import pickle
import os
from dotenv import load_dotenv
from langchain_community.retrievers import PineconeHybridSearchRetriever
from langchain.chains import create_history_aware_retriever, create_retrieval_chain
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables.history import RunnableWithMessageHistory
# Load environment variables
load_dotenv(".env")
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
PINECONE_API_KEY = os.getenv("PINECONE_API_KEY")
# Set environment variables
os.environ["GROQ_API_KEY"] = GROQ_API_KEY
os.environ["PINECONE_API_KEY"] = PINECONE_API_KEY
os.environ["TOKENIZERS_PARALLELISM"] = 'true'
# Initialize Pinecone index and BM25 encoder
pc = Pinecone(api_key=PINECONE_API_KEY)
pinecone_index = pc.Index("uae-national-library-and-archives-vectorstore")
bm25 = BM25Encoder().load("./UAE-NLA.json")
old_embed_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
# Initialize models and retriever
embed_model = HuggingFaceEmbeddings(model_name="Alibaba-NLP/gte-multilingual-base", model_kwargs={"trust_remote_code":True})
retriever = PineconeHybridSearchRetriever(
embeddings=embed_model,
sparse_encoder=bm25,
index=pinecone_index,
top_k=50,
alpha=0.5
)
# Initialize LLM
llm = ChatGroq(model="llama-3.1-70b-versatile", temperature=0, max_tokens=1024, max_retries=2)
# Contextualization prompt and retriever
contextualize_q_system_prompt = """Given a chat history and the latest user question \
which might reference context in the chat history, formulate a standalone question \
which can be understood without the chat history. Do NOT answer the question, \
just reformulate it if needed and otherwise return it as is.
"""
contextualize_q_prompt = ChatPromptTemplate.from_messages(
[
("system", contextualize_q_system_prompt),
MessagesPlaceholder("chat_history"),
("human", "{input}")
]
)
history_aware_retriever = create_history_aware_retriever(llm, retriever, contextualize_q_prompt)
# QA system prompt and chain
qa_system_prompt = """ You are a highly skilled information retrieval assistant. Use the following context to answer questions effectively. \
If you don't know the answer, state that you don't know. \
Your answer should be in {language} language. \
Provide answers in proper HTML format and keep them concise. \
When responding to queries, follow these guidelines: \
1. Provide Clear Answers: \
- If the question is asked in Arabic then answer in Arabic and if it is asked in English then answer in english.
- Ensure the response directly addresses the query with accurate and relevant information.\
2. Include Detailed References: \
- Links to Sources: Include URLs to credible sources where users can verify information or explore further. \
- Reference Sites: Mention specific websites or platforms that offer additional information. \
- Downloadable Materials: Provide links to any relevant downloadable resources if applicable. \
3. Formatting for Readability: \
- The answer should be in a proper HTML format with appropriate tags. \
- For arabic responses, align the text to right and convert numbers.
- Double check if the language of answer is correct or not.
- Use bullet points or numbered lists where applicable to present information clearly. \
- Highlight key details using bold or italics. \
- Provide proper and meaningful abbreviations for urls. Do not include naked urls. \
4. Organize Content Logically: \
- Structure the content in a logical order, ensuring easy navigation and understanding for the user. \
It is very important to follow this guideline or else you may lose the job.
{context}
"""
qa_prompt = ChatPromptTemplate.from_messages(
[
("system", qa_system_prompt),
MessagesPlaceholder("chat_history"),
("human", "{input}")
]
)
question_answer_chain = create_stuff_documents_chain(llm, qa_prompt)
# Retrieval and Generative (RAG) Chain
rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)
# Chat message history storage
store = {}
def clean_temporary_data():
store.clear()
def get_session_history(session_id: str) -> BaseChatMessageHistory:
if session_id not in store:
store[session_id] = ChatMessageHistory()
return store[session_id]
# Conversational RAG chain with message history
conversational_rag_chain = RunnableWithMessageHistory(
rag_chain,
get_session_history,
input_messages_key="input",
history_messages_key="chat_history",
language_message_key="language",
output_messages_key="answer",
)
import gradio as gr
def remote_response(message, chat_history):
bot_message = ""
language = "en"
response = conversational_rag_chain.invoke({"input": question, 'language': language},config={"configurable": {"session_id": "abc123"}},)
return response
# Gradio interface
with gr.Blocks() as demo:
chatbot = gr.Chatbot()
msg = gr.Textbox(show_label=False, placeholder="Type your message here...")
clear = gr.Button("Clear")
def respond(message, chat_history):
bot_message = ""
language = "en"
for response_chunk in conversational_rag_chain.stream({"input": question, 'language': language},config={"configurable": {"session_id": "abc123"}},):
bot_message += response_chunk['answer']
chat_history.append(("User", message))
chat_history.append(("Assistant", bot_message))
yield chat_history
msg.submit(respond, [msg, chatbot], chatbot)
clear.click(lambda: None, None, chatbot, queue=False)
demo.queue() # Enable queue for streaming
demo.launch()