Spaces:
Sleeping
Sleeping
from omegaconf import OmegaConf | |
import streamlit as st | |
import os | |
from PIL import Image | |
import re | |
from translate import Translator | |
from pydantic import Field | |
import sys | |
from llama_index.indices.managed.vectara import VectaraIndex | |
from llama_index.core.agent import ReActAgent | |
from llama_index.llms.openai import OpenAI | |
from llama_index.core.tools import QueryEngineTool, ToolMetadata | |
from llama_index.core.utils import print_text | |
from llama_index.core.agent.react.formatter import ReActChatFormatter | |
from llama_index.core.tools import FunctionTool | |
from prompts import prompt_template | |
learning_styles = ['traditional', 'Inquiry-based', 'Socratic'] | |
languages = {'English': 'en', 'Spanish': 'es', 'French': 'fr', 'German': 'de', 'Arabic': 'ar', 'Chinese': 'zh-cn', | |
'Hebrew': 'he', 'Hindi': 'hi', 'Italian': 'it', 'Japanese': 'ja', 'Korean': 'ko', 'Portuguese': 'pt'} | |
initial_prompt = "How can I help you today?" | |
def launch_bot(): | |
def reset(): | |
cfg = st.session_state.cfg | |
llm = OpenAI(model="gpt-4o", temperature=0) | |
tr_prompt = Translator(to_lang=languages[cfg.language]).translate(initial_prompt) | |
st.session_state.messages = [{"role": "assistant", "content": tr_prompt, "avatar": "π¦"}] | |
st.session_state.thinking_prompt = Translator(to_lang=languages[cfg.language]).translate("Thinking...") | |
vectara = VectaraIndex(vectara_api_key=cfg.api_key, | |
vectara_customer_id=cfg.customer_id, | |
vectara_corpus_id=cfg.corpus_id) | |
# Create tool to adapt output to style, age and language | |
def adjust_response_to_student( | |
text: str = Field(descrition='the original text'), | |
age: int = Field(description='the age of the student. An integer'), | |
style: str = Field(description='teaching style'), | |
language: str = Field(description='the language') | |
) -> str: | |
""" | |
Rephrase the text to match the student's age, desired teaching style and language | |
""" | |
llm = OpenAI(model="gpt-4o", temperature=0) | |
print(f"DEBUG: Adjusting response to student age {age}, style {style} and language {language}") | |
prompt = f''' | |
The following is response the teacher is planning to provide to a student based on their question. | |
Please adjust the response to match the student's age of {age}, the {style} teaching style. | |
For example, in the inquiry-based teaching style, choose to ask questions that encourage the student to think critically instead of repsonding directly with the answer. | |
Or in the socratic teaching style, choose to ask questions that lead the student to the answer. | |
Always respond in the {language} language. | |
original response: {text} | |
adjusted response: | |
''' | |
response = llm.complete(prompt) | |
return response | |
# Create the Vectara Tool | |
vectara_tool = QueryEngineTool( | |
query_engine = vectara.as_query_engine(summary_enabled = True, summary_num_results = 10, summary_response_lang = languages[cfg.language], | |
summary_prompt_name = "vectara-summary-ext-24-05-large", | |
vectara_query_mode = "mmr", rerank_k = 50, mmr_diversity_bias = 0.1, | |
n_sentence_before = 5, n_sentence_after = 5), | |
metadata = ToolMetadata(name="vectara", | |
description=""" | |
A tool that is able to answer questions about the justice, morality, politics and related topics. | |
Based on transcripts of recordings from the Justice Harvard class that includes a lot of content on these topics. | |
When using the tool it's best to ask simple short questions. | |
"""), | |
) | |
rephrase_tool = FunctionTool.from_defaults(adjust_response_to_student) | |
# Create the agent | |
prompt = prompt_template.replace("{style}", cfg.style) \ | |
.replace("{language}", cfg.language) \ | |
.replace("{student_age}", str(cfg.student_age)) | |
st.session_state.agent = ReActAgent.from_tools( | |
tools=[vectara_tool, rephrase_tool], llm=llm, | |
verbose=True, | |
react_chat_formatter = ReActChatFormatter(system_header=prompt) | |
) | |
if 'cfg' not in st.session_state: | |
cfg = OmegaConf.create({ | |
'customer_id': str(os.environ['VECTARA_CUSTOMER_ID']), | |
'corpus_id': str(os.environ['VECTARA_CORPUS_ID']), | |
'api_key': str(os.environ['VECTARA_API_KEY']), | |
'style': learning_styles[0], | |
'language': 'English', | |
'student_age': 18 | |
}) | |
st.session_state.cfg = cfg | |
st.session_state.style = cfg.style | |
st.session_state.language = cfg.language | |
st.session_state.student_age = cfg.student_age | |
reset() | |
cfg = st.session_state.cfg | |
st.set_page_config(page_title="Teaching Assistant", layout="wide") | |
# left side content | |
with st.sidebar: | |
image = Image.open('Vectara-logo.png') | |
st.image(image, width=250) | |
st.markdown("## Welcome to the Justice Harvard e-learning assistant demo.\n\n\n") | |
st.markdown("\n") | |
cfg.style = st.selectbox('Learning Style:', learning_styles) | |
if st.session_state.style != cfg.style: | |
st.session_state.style = cfg.style | |
reset() | |
st.markdown("\n") | |
cfg.language = st.selectbox('Language:', languages.keys()) | |
if st.session_state.language != cfg.language: | |
st.session_state.langage = cfg.language | |
reset() | |
st.markdown("\n") | |
cfg.student_age = st.number_input( | |
'Student age:', min_value=13, value=cfg.student_age, | |
step=1, format='%i' | |
) | |
if st.session_state.student_age != cfg.student_age: | |
st.session_state.student_age = cfg.student_age | |
reset() | |
st.markdown("\n\n") | |
if st.button('Start Over'): | |
reset() | |
st.markdown("---") | |
st.markdown( | |
"## How this works?\n" | |
"This app was built with [Vectara](https://vectara.com).\n\n" | |
"It demonstrates the use of Agentic Chat functionality with Vectara" | |
) | |
st.markdown("---") | |
if "messages" not in st.session_state.keys(): | |
reset() | |
# Display chat messages | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"], avatar=message["avatar"]): | |
st.write(message["content"]) | |
# User-provided prompt | |
if prompt := st.chat_input(): | |
st.session_state.messages.append({"role": "user", "content": prompt, "avatar": 'π§βπ»'}) | |
with st.chat_message("user", avatar='π§βπ»'): | |
print_text(f"Starting new question: {prompt}\n", color='green') | |
st.write(prompt) | |
# Generate a new response if last message is not from assistant | |
if st.session_state.messages[-1]["role"] != "assistant": | |
with st.chat_message("assistant", avatar='π€'): | |
with st.spinner(st.session_state.thinking_prompt): | |
res = st.session_state.agent.chat(prompt) | |
cleaned = re.sub(r'\[\d+\]', '', res.response) | |
st.write(cleaned) | |
message = {"role": "assistant", "content": cleaned, "avatar": 'π€'} | |
st.session_state.messages.append(message) | |
sys.stdout.flush() | |
if __name__ == "__main__": | |
launch_bot() | |