wu981526092's picture
add
6050414
from llama_index.llms.azure_openai import AzureOpenAI
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings, ChatPromptTemplate
import logging
import sys
import os
# Create LLM and Embedding models
def create_models():
llm = AzureOpenAI(
deployment_name="personality_gpt4o",
api_key=os.environ.get("AZURE_OPENAI_KEY"),
azure_endpoint="https://personalityanalysisfinetuning.openai.azure.com/",
api_version="2024-02-01",
)
embed_model = AzureOpenAIEmbedding(
deployment_name="personality_rag_embedding",
api_key=os.environ.get("AZURE_OPENAI_KEY"),
azure_endpoint="https://personalityanalysisfinetuning.openai.azure.com/",
api_version="2024-02-01",
)
return llm, embed_model
# Configure settings
def configure_settings(llm, embed_model):
Settings.llm = llm
Settings.embed_model = embed_model
Settings.chunk_size = 2048
Settings.chunk_overlap = 50
# Load documents and create index
def load_documents_and_create_index():
documents = SimpleDirectoryReader(input_dir="rag_data/").load_data()
return VectorStoreIndex.from_documents(documents)
# Create chat prompt template
def create_chat_prompt_template(profile=None):
text_qa_template_str = (
"You are a knowledgeable personality coach providing insights based on the specific personality analysis provided below."
"\n---------------------\n{{profile}}\n---------------------\n"
"Answer questions about yourself (chatbot) and personality analysis based on the technical manual about yourself (chatbot) and personality analysis below "
"\n---------------------\n{context_str}\n---------------------\n"
"Your responses should around 100 words, directly relate to the user's question, drawing on relevant details from the analysis."
"Do not answer unrelevant questions. If the user's question does not pertain to the personality analysis and yourself (chatbot) or is beyond the scope of the information provided, "
"politely decline to answer, stating that the question is outside the analysis context."
"Focus on delivering concise, accurate, insightful, and relevant information."
"Question: {query_str}")
if profile:
text_qa_template_str = text_qa_template_str.replace("{{profile}}", profile)
print(text_qa_template_str)
chat_text_qa_msgs = [
("system",
"Your name is \"Personality Coach\", You are an expert in career advice and personality consultant from "
"the company Meta Profiling. Do not infer or assume information beyond what's explicitly provided in the conversation."
# "Avoid drawing on external knowledge or making generalizations not directly supported by "
# "the report content. Do not answer unrelevant questions."
),
("user", text_qa_template_str),
]
return ChatPromptTemplate.from_messages(chat_text_qa_msgs)
# Execute query
def execute_query(index, template, query):
query_engine = index.as_query_engine(similarity_top_k=2, text_qa_template=template)
answer = query_engine.query(query)
# print(answer.get_formatted_sources())
return answer
def invoke(question,profile):
if profile is None:
return "Profile is missing"
# setup_environment()
# setup_logging()
llm, embed_model = create_models()
configure_settings(llm, embed_model)
index = load_documents_and_create_index()
chat_prompt_template = create_chat_prompt_template(profile)
return execute_query(index, chat_prompt_template, question)