|
from llama_index.llms.azure_openai import AzureOpenAI |
|
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding |
|
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings, ChatPromptTemplate |
|
import logging |
|
import sys |
|
import os |
|
|
|
|
|
|
|
def create_models(): |
|
llm = AzureOpenAI( |
|
deployment_name=os.environ.get("AZURE_OPENAI_DEPLOYMENT_ID_RAG"), |
|
api_key=os.environ.get("OPENAI_API_KEY_RAG"), |
|
azure_endpoint=os.environ.get("AZURE_OPENAI_ENDPOINT_RAG"), |
|
api_version="2024-02-01", |
|
) |
|
embed_model = AzureOpenAIEmbedding( |
|
deployment_name="personality_rag_embedding", |
|
api_key=os.environ.get("OPENAI_API_KEY_RAG"), |
|
azure_endpoint=os.environ.get("AZURE_OPENAI_ENDPOINT_RAG"), |
|
api_version="2024-02-01", |
|
) |
|
return llm, embed_model |
|
|
|
|
|
|
|
def configure_settings(llm, embed_model): |
|
Settings.llm = llm |
|
Settings.embed_model = embed_model |
|
Settings.chunk_size = 2048 |
|
Settings.chunk_overlap = 50 |
|
|
|
|
|
|
|
def load_documents_and_create_index(): |
|
documents = SimpleDirectoryReader(input_dir="rag_data/").load_data() |
|
return VectorStoreIndex.from_documents(documents) |
|
|
|
|
|
|
|
def create_chat_prompt_template(profile=None): |
|
text_qa_template_str = ( |
|
"You are a knowledgeable personality coach providing insights based on the specific personality analysis provided below." |
|
"\n---------------------\n{{profile}}\n---------------------\n" |
|
"Answer questions about yourself (chatbot) and personality analysis based on the technical manual about yourself (chatbot) and personality analysis below " |
|
"\n---------------------\n{context_str}\n---------------------\n" |
|
"Your responses should around 100 words, directly relate to the user's question, drawing on relevant details from the analysis." |
|
"Do not answer unrelevant questions. If the user's question does not pertain to the personality analysis and yourself (chatbot) or is beyond the scope of the information provided, " |
|
"politely decline to answer, stating that the question is outside the analysis context." |
|
"Focus on delivering concise, accurate, insightful, and relevant information." |
|
"Question: {query_str}") |
|
|
|
if profile: |
|
text_qa_template_str = text_qa_template_str.replace("{{profile}}", profile) |
|
|
|
print(text_qa_template_str) |
|
|
|
chat_text_qa_msgs = [ |
|
("system", |
|
"Your name is \"Personality Coach\", You are an expert in career advice and personality consultant from " |
|
"the company Meta Profiling. Do not infer or assume information beyond what's explicitly provided in the conversation." |
|
|
|
|
|
), |
|
("user", text_qa_template_str), |
|
] |
|
return ChatPromptTemplate.from_messages(chat_text_qa_msgs) |
|
|
|
|
|
|
|
def execute_query(index, template, query): |
|
query_engine = index.as_query_engine(similarity_top_k=2, text_qa_template=template) |
|
answer = query_engine.query(query) |
|
|
|
return answer |
|
|
|
|
|
def invoke(question,profile): |
|
|
|
if profile is None: |
|
return "Profile is missing" |
|
|
|
|
|
|
|
llm, embed_model = create_models() |
|
configure_settings(llm, embed_model) |
|
index = load_documents_and_create_index() |
|
chat_prompt_template = create_chat_prompt_template(profile) |
|
return execute_query(index, chat_prompt_template, question) |