Spaces:
Running
Running
File size: 7,863 Bytes
d5ed529 771e8d7 d5ed529 6eab5e8 d5ed529 76eb72e 771e8d7 d5ed529 76eb72e d5ed529 76eb72e d5ed529 76eb72e d5ed529 76eb72e 771e8d7 4e3842f 771e8d7 4e3842f 771e8d7 4e3842f 771e8d7 4e3842f 771e8d7 76eb72e d5ed529 76eb72e d5ed529 76eb72e 771e8d7 76eb72e d5ed529 771e8d7 76eb72e d5ed529 76eb72e d5ed529 76eb72e d5ed529 76eb72e d5ed529 76eb72e d5ed529 22ac629 d5ed529 22ac629 d5ed529 76eb72e d5ed529 22ac629 d5ed529 771e8d7 d5ed529 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 |
from omegaconf import OmegaConf
import streamlit as st
import os
from PIL import Image
import re
from translate import Translator
from pydantic import Field
import sys
from llama_index.indices.managed.vectara import VectaraIndex
from llama_index.core.agent import ReActAgent
from llama_index.llms.openai import OpenAI
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.core.utils import print_text
from llama_index.core.agent.react.formatter import ReActChatFormatter
from llama_index.core.tools import FunctionTool
from prompts import prompt_template
learning_styles = ['traditional', 'Inquiry-based', 'Socratic']
languages = {'English': 'en', 'Spanish': 'es', 'French': 'fr', 'German': 'de', 'Arabic': 'ar', 'Chinese': 'zh-cn',
'Hebrew': 'he', 'Hindi': 'hi', 'Italian': 'it', 'Japanese': 'ja', 'Korean': 'ko', 'Portuguese': 'pt'}
initial_prompt = "How can I help you today?"
def launch_bot():
def reset():
cfg = st.session_state.cfg
llm = OpenAI(model="gpt-4o", temperature=0)
tr_prompt = Translator(to_lang=languages[cfg.language]).translate(initial_prompt)
st.session_state.messages = [{"role": "assistant", "content": tr_prompt, "avatar": "π¦"}]
st.session_state.thinking_prompt = Translator(to_lang=languages[cfg.language]).translate("Thinking...")
vectara = VectaraIndex(vectara_api_key=cfg.api_key,
vectara_customer_id=cfg.customer_id,
vectara_corpus_id=cfg.corpus_id)
# Create tool to adapt output to style, age and language
def adjust_response_to_student(
text: str = Field(descrition='the original text'),
age: int = Field(description='the age of the student. An integer'),
style: str = Field(description='teaching style'),
language: str = Field(description='the language')
) -> str:
"""
Rephrase the text to match the student's age, desired teaching style and language
"""
llm = OpenAI(model="gpt-4o", temperature=0)
print(f"DEBUG: Adjusting response to student age {age}, style {style} and language {language}")
prompt = f'''
The following is response the teacher is planning to provide to a student based on their question.
Please adjust the response to match the student's age of {age}, the {style} teaching style.
For example, in the inquiry-based teaching style, choose to ask questions that encourage the student to think critically instead of repsonding directly with the answer.
Or in the socratic teaching style, choose to ask questions that lead the student to the answer.
Always respond in the {language} language.
original response: {text}
adjusted response:
'''
response = llm.complete(prompt)
return response
# Create the Vectara Tool
vectara_tool = QueryEngineTool(
query_engine = vectara.as_query_engine(summary_enabled = True, summary_num_results = 10, summary_response_lang = languages[cfg.language],
summary_prompt_name = "vectara-summary-ext-24-05-large",
vectara_query_mode = "mmr", rerank_k = 50, mmr_diversity_bias = 0.1,
n_sentence_before = 5, n_sentence_after = 5),
metadata = ToolMetadata(name="vectara",
description="""
A tool that is able to answer questions about the justice, morality, politics and related topics.
Based on transcripts of recordings from the Justice Harvard class that includes a lot of content on these topics.
When using the tool it's best to ask simple short questions.
"""),
)
rephrase_tool = FunctionTool.from_defaults(adjust_response_to_student)
# Create the agent
prompt = prompt_template.replace("{style}", cfg.style) \
.replace("{language}", cfg.language) \
.replace("{student_age}", str(cfg.student_age))
st.session_state.agent = ReActAgent.from_tools(
tools=[vectara_tool, rephrase_tool], llm=llm,
verbose=True,
react_chat_formatter = ReActChatFormatter(system_header=prompt)
)
if 'cfg' not in st.session_state:
cfg = OmegaConf.create({
'customer_id': str(os.environ['VECTARA_CUSTOMER_ID']),
'corpus_id': str(os.environ['VECTARA_CORPUS_ID']),
'api_key': str(os.environ['VECTARA_API_KEY']),
'style': learning_styles[0],
'language': 'English',
'student_age': 18
})
st.session_state.cfg = cfg
st.session_state.style = cfg.style
st.session_state.language = cfg.language
st.session_state.student_age = cfg.student_age
reset()
cfg = st.session_state.cfg
st.set_page_config(page_title="Teaching Assistant", layout="wide")
# left side content
with st.sidebar:
image = Image.open('Vectara-logo.png')
st.image(image, width=250)
st.markdown("## Welcome to the Justice Harvard e-learning assistant demo.\n\n\n")
st.markdown("\n")
cfg.style = st.selectbox('Learning Style:', learning_styles)
if st.session_state.style != cfg.style:
st.session_state.style = cfg.style
reset()
st.markdown("\n")
cfg.language = st.selectbox('Language:', languages.keys())
if st.session_state.language != cfg.language:
st.session_state.langage = cfg.language
reset()
st.markdown("\n")
cfg.student_age = st.number_input(
'Student age:', min_value=13, value=cfg.student_age,
step=1, format='%i'
)
if st.session_state.student_age != cfg.student_age:
st.session_state.student_age = cfg.student_age
reset()
st.markdown("\n\n")
if st.button('Start Over'):
reset()
st.markdown("---")
st.markdown(
"## How this works?\n"
"This app was built with [Vectara](https://vectara.com).\n\n"
"It demonstrates the use of Agentic Chat functionality with Vectara"
)
st.markdown("---")
if "messages" not in st.session_state.keys():
reset()
# Display chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"], avatar=message["avatar"]):
st.write(message["content"])
# User-provided prompt
if prompt := st.chat_input():
st.session_state.messages.append({"role": "user", "content": prompt, "avatar": 'π§βπ»'})
with st.chat_message("user", avatar='π§βπ»'):
print_text(f"Starting new question: {prompt}\n", color='green')
st.write(prompt)
# Generate a new response if last message is not from assistant
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant", avatar='π€'):
with st.spinner(st.session_state.thinking_prompt):
res = st.session_state.agent.chat(prompt)
cleaned = re.sub(r'\[\d+\]', '', res.response)
st.write(cleaned)
message = {"role": "assistant", "content": cleaned, "avatar": 'π€'}
st.session_state.messages.append(message)
sys.stdout.flush()
if __name__ == "__main__":
launch_bot()
|