File size: 6,654 Bytes
5cba5d7
 
 
 
 
 
 
 
 
 
 
 
 
 
d37f2b6
5cba5d7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f5f433f
 
 
417f8da
f5f433f
 
5cba5d7
 
 
c020a4c
5cba5d7
 
f5f433f
5cba5d7
f5f433f
a30443b
5cba5d7
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_huggingface.embeddings import HuggingFaceEmbeddings
from langchain.retrievers.document_compressors import EmbeddingsFilter
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers import EnsembleRetriever
from langchain_community.vectorstores import FAISS
from langchain_groq import ChatGroq
from pinecone import Pinecone, ServerlessSpec
from pinecone_text.sparse import BM25Encoder
from langchain import hub
import pickle
import os
from dotenv import load_dotenv
from langchain_community.retrievers import PineconeHybridSearchRetriever
from langchain.chains import create_history_aware_retriever, create_retrieval_chain
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables.history import RunnableWithMessageHistory


# Load environment variables
load_dotenv(".env")
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
PINECONE_API_KEY = os.getenv("PINECONE_API_KEY")


# Set environment variables
os.environ["GROQ_API_KEY"] = GROQ_API_KEY
os.environ["PINECONE_API_KEY"] = PINECONE_API_KEY
os.environ["TOKENIZERS_PARALLELISM"] = 'true'

# Initialize Pinecone index and BM25 encoder
pc = Pinecone(api_key=PINECONE_API_KEY)
pinecone_index = pc.Index("uae-national-library-and-archives-vectorstore")
bm25 = BM25Encoder().load("./UAE-NLA.json")


old_embed_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")

# Initialize models and retriever
embed_model = HuggingFaceEmbeddings(model_name="Alibaba-NLP/gte-multilingual-base", model_kwargs={"trust_remote_code":True})
retriever = PineconeHybridSearchRetriever(
    embeddings=embed_model, 
    sparse_encoder=bm25, 
    index=pinecone_index, 
    top_k=50, 
    alpha=0.5
)

# Initialize LLM
llm = ChatGroq(model="llama-3.1-70b-versatile", temperature=0, max_tokens=1024, max_retries=2)

# Contextualization prompt and retriever
contextualize_q_system_prompt = """Given a chat history and the latest user question \
which might reference context in the chat history, formulate a standalone question \
which can be understood without the chat history. Do NOT answer the question, \
just reformulate it if needed and otherwise return it as is.
"""
contextualize_q_prompt = ChatPromptTemplate.from_messages(
    [
        ("system", contextualize_q_system_prompt),
        MessagesPlaceholder("chat_history"),
        ("human", "{input}")
    ]
)
history_aware_retriever = create_history_aware_retriever(llm, retriever, contextualize_q_prompt)

# QA system prompt and chain
qa_system_prompt = """ You are a highly skilled information retrieval assistant. Use the following context to answer questions effectively. \
If you don't know the answer, state that you don't know. \
Your answer should be in {language} language. \
Provide answers in proper HTML format and keep them concise. \
When responding to queries, follow these guidelines: \
    1. Provide Clear Answers: \
        - If the question is asked in Arabic then answer in Arabic and if it is asked in English then answer in english.
        - Ensure the response directly addresses the query with accurate and relevant information.\
    2. Include Detailed References: \
        - Links to Sources: Include URLs to credible sources where users can verify information or explore further. \
        - Reference Sites: Mention specific websites or platforms that offer additional information. \
        - Downloadable Materials: Provide links to any relevant downloadable resources if applicable. \
    
    3. Formatting for Readability: \
        - The answer should be in a proper HTML format with appropriate tags. \
        - For arabic responses, align the text to right and convert numbers.
        - Double check if the language of answer is correct or not.
        - Use bullet points or numbered lists where applicable to present information clearly. \
        - Highlight key details using bold or italics. \
        - Provide proper and meaningful abbreviations for urls. Do not include naked urls. \
    
    4. Organize Content Logically: \
        - Structure the content in a logical order, ensuring easy navigation and understanding for the user. \

It is very important to follow this guideline or else you may lose the job.
        
{context}
"""
qa_prompt = ChatPromptTemplate.from_messages(
    [
        ("system", qa_system_prompt),
        MessagesPlaceholder("chat_history"),
        ("human", "{input}")
    ]
)
question_answer_chain = create_stuff_documents_chain(llm, qa_prompt)

# Retrieval and Generative (RAG) Chain
rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)

# Chat message history storage
store = {}

def clean_temporary_data():
    store.clear()

def get_session_history(session_id: str) -> BaseChatMessageHistory:
    if session_id not in store:
        store[session_id] = ChatMessageHistory()
    return store[session_id]

# Conversational RAG chain with message history
conversational_rag_chain = RunnableWithMessageHistory(
    rag_chain,
    get_session_history,
    input_messages_key="input",
    history_messages_key="chat_history",
    language_message_key="language",
    output_messages_key="answer",
)

import gradio as gr

def remote_response(message, chat_history):
    bot_message = ""
    language = "en"
    response = conversational_rag_chain.invoke({"input": question, 'language': language},config={"configurable": {"session_id": "abc123"}},)
    return response

# Gradio interface
with gr.Blocks() as demo:
    chatbot = gr.Chatbot()
    msg = gr.Textbox(show_label=False, placeholder="Type your message here...")
    clear = gr.Button("Clear")

    def respond(message, chat_history):
        bot_message = ""
        language = "en"
        for response_chunk in conversational_rag_chain.stream({"input": question, 'language': language},config={"configurable": {"session_id": "abc123"}},):
            bot_message += response_chunk['answer']
            chat_history.append(("User", message))
            chat_history.append(("Assistant", bot_message))
            yield chat_history

    msg.submit(respond, [msg, chatbot], chatbot)
    clear.click(lambda: None, None, chatbot, queue=False)

demo.queue()  # Enable queue for streaming
demo.launch()