from omegaconf import OmegaConf import streamlit as st import os from PIL import Image import re import sys import datetime from dotenv import load_dotenv from pydantic import Field, BaseModel from vectara_agent.agent import Agent, AgentStatusType from vectara_agent.tools import ToolsFactory from vectara_agent.tools_catalog import rephrase_text teaching_styles = ['Inquiry-based', 'Socratic', 'traditional'] languages = {'English': 'en', 'Spanish': 'es', 'French': 'fr', 'German': 'de', 'Arabic': 'ar', 'Chinese': 'zh-cn', 'Hebrew': 'he', 'Hindi': 'hi', 'Italian': 'it', 'Japanese': 'ja', 'Korean': 'ko', 'Portuguese': 'pt'} initial_prompt = "How can I help you today?" load_dotenv(override=True) def create_tools(cfg): def adjust_response_to_student( text: str = Field(description='the original text.'), age: int = Field(description='the age of the student. An integer'), style: str = Field(description='teaching style'), language: str = Field(description='the language') ) -> str: """ Rephrase the text to match the student's age, desired teaching style and language """ instructions = f''' The following is response the teacher is planning to provide to a student based on their question. Please adjust the response to match the student's age of {age}, the {style} teaching style. For example, in the inquiry-based teaching style, choose to ask questions that encourage the student to think critically instead of repsonding directly with the answer. Or in the socratic teaching style, choose to ask questions that lead the student to the answer. Always respond in the {language} language.''' \ .replace("{style}", cfg.style) \ .replace("{language}", cfg.language) \ .replace("{student_age}", str(cfg.student_age)) return rephrase_text(text, instructions) class JusticeHarvardArgs(BaseModel): query: str = Field(..., description="The user query.") tools_factory = ToolsFactory(vectara_api_key=cfg.api_key, vectara_customer_id=cfg.customer_id, vectara_corpus_id=cfg.corpus_id) query_tool = tools_factory.create_rag_tool( tool_name = "justice_harvard_query", tool_description = """ Answer questions about the justice, morality, politics and related topics, based on transcripts of recordings from the Justice Harvard class that includes a lot of content on these topics. When using the tool it's best to ask simple short questions. You can break complex questions into sub-queries. """, tool_args_schema = JusticeHarvardArgs, reranker = "multilingual_reranker_v1", rerank_k = 100, n_sentences_before = 2, n_sentences_after = 2, lambda_val = 0.005, summary_num_results = 10, vectara_summarizer = 'vectara-summary-ext-24-05-med-omni', include_citations = True, ) return (tools_factory.get_tools( [ adjust_response_to_student, ] ) + tools_factory.standard_tools() + tools_factory.guardrail_tools() + [query_tool] ) def initialize_agent(_cfg): bot_instructions = f""" - You are a helpful teacher assistant, with expertise in education in various teaching styles. - Obtain information using tools to answer the user's query. - If the tool cannot provide information relevant to the user's query, tell the user that you are unable to provide an answer. - If the tool can provide relevant information, use the adjust_response_to_student tool to rephrase the response to ensure it fits the student's age of {_cfg.student_age}, the {_cfg.style} teaching style and the {_cfg.language} language. - Response in a concise and clear manner, and provide the most relevant information to the student. - Never discuss politics, and always respond politely. """ def update_func(status_type: AgentStatusType, msg: str): if status_type != AgentStatusType.AGENT_UPDATE: output = f"{status_type.value} - {msg}" st.session_state.log_messages.append(output) agent = Agent( tools=create_tools(_cfg), topic="justice, morality, politics, and philosophy", custom_instructions=bot_instructions, update_func=update_func ) return agent def toggle_logs(): st.session_state.show_logs = not st.session_state.show_logs def launch_bot(): def reset(): cfg = st.session_state.cfg st.session_state.messages = [{"role": "assistant", "content": initial_prompt, "avatar": "🦖"}] st.session_state.thinking_message = "Agent at work..." st.session_state.agent = initialize_agent(cfg) st.session_state.log_messages = [] st.session_state.prompt = None st.session_state.show_logs = False st.set_page_config(page_title="Justice Harvard Teaching Assistant", layout="wide") if 'cfg' not in st.session_state: cfg = OmegaConf.create({ 'customer_id': str(os.environ['VECTARA_CUSTOMER_ID']), 'corpus_id': str(os.environ['VECTARA_CORPUS_ID']), 'api_key': str(os.environ['VECTARA_API_KEY']), 'style': teaching_styles[0], 'language': 'English', 'student_age': 18 }) st.session_state.cfg = cfg st.session_state.style = cfg.style st.session_state.language = cfg.language st.session_state.student_age = cfg.student_age reset() cfg = st.session_state.cfg # left side content with st.sidebar: image = Image.open('Vectara-logo.png') st.image(image, width=250) st.markdown("## Welcome to the Justice Harvard e-learning assistant demo.\n\n\n") st.markdown("\n") cfg.style = st.selectbox('Teacher Style:', teaching_styles) if st.session_state.style != cfg.style: st.session_state.style = cfg.style reset() st.markdown("\n") cfg.language = st.selectbox('Language:', languages.keys()) if st.session_state.language != cfg.language: st.session_state.langage = cfg.language reset() st.markdown("\n") cfg.student_age = st.number_input( 'Student age:', min_value=13, max_value=99, value=cfg.student_age, step=1, format='%i' ) if st.session_state.student_age != cfg.student_age: st.session_state.student_age = cfg.student_age reset() st.markdown("\n\n") bc1, _ = st.columns([1, 1]) with bc1: if st.button('Start Over'): reset() st.markdown("---") st.markdown( "## How this works?\n" "This app was built with [Vectara](https://vectara.com).\n\n" "It demonstrates the use of Agentic Chat functionality with Vectara" ) st.markdown("---") if "messages" not in st.session_state.keys(): reset() # Display chat messages for message in st.session_state.messages: with st.chat_message(message["role"], avatar=message["avatar"]): st.write(message["content"]) # User-provided prompt if prompt := st.chat_input(): st.session_state.messages.append({"role": "user", "content": prompt, "avatar": '🧑‍💻'}) st.session_state.prompt = prompt # Save the prompt in session state st.session_state.log_messages = [] st.session_state.show_logs = False with st.chat_message("user", avatar='🧑‍💻'): print(f"Starting new question: {prompt}\n") st.write(prompt) # Generate a new response if last message is not from assistant if st.session_state.prompt: with st.chat_message("assistant", avatar='🤖'): with st.spinner(st.session_state.thinking_message): res = st.session_state.agent.chat(st.session_state.prompt) res = res.replace('$', '\\$') # escape dollar sign for markdown message = {"role": "assistant", "content": res, "avatar": '🤖'} st.session_state.messages.append(message) st.markdown(res) st.session_state.prompt = None log_placeholder = st.empty() with log_placeholder.container(): if st.session_state.show_logs: st.button("Hide Logs", on_click=toggle_logs) for msg in st.session_state.log_messages: st.text(msg) else: if len(st.session_state.log_messages) > 0: st.button("Show Logs", on_click=toggle_logs) sys.stdout.flush() if __name__ == "__main__": launch_bot()