File size: 6,234 Bytes
d5ed529
 
 
 
 
 
 
 
 
6eab5e8
d5ed529
 
 
 
76eb72e
d5ed529
76eb72e
 
 
d5ed529
 
 
 
76eb72e
d5ed529
 
 
 
 
 
76eb72e
d5ed529
 
 
76eb72e
 
d5ed529
76eb72e
 
 
 
 
 
 
 
 
 
d5ed529
76eb72e
 
 
 
 
 
 
d5ed529
 
76eb72e
 
d5ed529
 
76eb72e
d5ed529
 
 
 
 
 
 
76eb72e
d5ed529
 
76eb72e
 
 
d5ed529
 
 
 
 
 
 
 
 
76eb72e
d5ed529
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76eb72e
d5ed529
 
a63c51e
d5ed529
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148

from omegaconf import OmegaConf
import streamlit as st
import os
from PIL import Image
import re
from translate import Translator


from llama_index.indices.managed.vectara import VectaraIndex
from llama_index.core.agent import ReActAgent
from llama_index.llms.openai import OpenAI
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.core.utils import print_text
from llama_index.core.agent.react.formatter import ReActChatFormatter

from prompts import prompt_template

learning_styles = ['traditional', 'Inquiry-based', 'Socratic']
languages = {'English': 'en', 'Spanish': 'es', 'French': 'fr', 'German': 'de', 'Arabic': 'ar', 'Chinese': 'zh-cn', 
             'Hebrew': 'he', 'Hindi': 'hi', 'Italian': 'it', 'Japanese': 'ja', 'Korean': 'ko', 'Portuguese': 'pt'}
initial_prompt = "How can I help you today?"


def launch_bot():
    def reset():
        cfg = st.session_state.cfg
        llm = OpenAI(model="gpt-4o", temperature=0)        
        tr_prompt = Translator(to_lang=languages[cfg.language]).translate(initial_prompt)
        st.session_state.messages = [{"role": "assistant", "content": tr_prompt, "avatar": "πŸ¦–"}]
        st.session_state.thinking_prompt = Translator(to_lang=languages[cfg.language]).translate("Thinking...")
        vectara = VectaraIndex(vectara_api_key=cfg.api_key, 
                               vectara_customer_id=cfg.customer_id, 
                               vectara_corpus_id=cfg.corpus_id)

        # Create the Vectara Tool
        vectara_tool = QueryEngineTool(
            query_engine = vectara.as_query_engine(summary_enabled = True, summary_num_results = 10, summary_response_lang = languages[cfg.language],
                                                   summary_prompt_name = "vectara-summary-ext-24-05-large",
                                                   vectara_query_mode = "mmr", rerank_k = 50, mmr_diversity_bias = 0.1,
                                                   n_sentence_before = 5, n_sentence_after = 5),
            metadata = ToolMetadata(name="vectara", 
                                    description="""
                                    A tool that is able to answer questions about the justice, morality, politics and related topics.
                                    Based on transcripts of recordings from the Justice Harvard class that includes a lot of content on these topics.
                                    When using the tool it's best to ask simple short questions.
                                    """),
        )

        # Create the agent
        prompt = prompt_template.replace("{style}", cfg.style) \
                                .replace("{language}", cfg.language) \
                                .replace("{student_age}", str(cfg.student_age))
        
        print(prompt)
        st.session_state.agent = ReActAgent.from_tools(
            tools=[vectara_tool], llm=llm, 
            verbose=True,
            react_chat_formatter = ReActChatFormatter(system_header=prompt)
        )


    if 'cfg' not in st.session_state:
        cfg = OmegaConf.create({
            'customer_id': str(os.environ['VECTARA_CUSTOMER_ID']),
            'corpus_id': str(os.environ['VECTARA_CORPUS_ID']),
            'api_key': str(os.environ['VECTARA_API_KEY']),
            'style': learning_styles[0],
            'language': 'English',
            'student_age': 18
        })
        st.session_state.cfg = cfg
        st.session_state.style = cfg.style
        st.session_state.language = cfg.language
        st.session_state.student_age = cfg.student_age
        reset()

    cfg = st.session_state.cfg
    st.set_page_config(page_title="Teaching Assistant", layout="wide")

    # left side content
    with st.sidebar:
        image = Image.open('Vectara-logo.png')
        st.image(image, width=250)
        st.markdown("## Welcome to the Justice Harvard e-learning assistant demo.\n\n\n")

        st.markdown("\n")
        cfg.style = st.selectbox('Learning Style:', learning_styles)
        if st.session_state.style != cfg.style:
            st.session_state.style = cfg.style
            reset()

        st.markdown("\n")
        cfg.language = st.selectbox('Language:', languages.keys())
        if st.session_state.language != cfg.language:
            st.session_state.langage = cfg.language
            reset()

        st.markdown("\n") 
        cfg.student_age = st.number_input(
            'Student age:',  min_value=13, value=cfg.student_age,
            step=1, format='%i'
        )
        if st.session_state.student_age != cfg.student_age:
            st.session_state.student_age = cfg.student_age
            reset()

        st.markdown("\n\n")
        if st.button('Start Over'):
            reset()

        st.markdown("---")
        st.markdown(
            "## How this works?\n"
            "This app was built with [Vectara](https://vectara.com).\n\n"
            "It demonstrates the use of Agentic Chat functionality with Vectara"
        )
        st.markdown("---")


    if "messages" not in st.session_state.keys():
        reset()

    # Display chat messages
    for message in st.session_state.messages:
        with st.chat_message(message["role"], avatar=message["avatar"]):
            st.write(message["content"])

    # User-provided prompt
    if prompt := st.chat_input():
        st.session_state.messages.append({"role": "user", "content": prompt, "avatar": 'πŸ§‘β€πŸ’»'})
        with st.chat_message("user", avatar='πŸ§‘β€πŸ’»'):
            print_text(f"Starting new question: {prompt}\n", color='green')
            st.write(prompt)

    # Generate a new response if last message is not from assistant
    if st.session_state.messages[-1]["role"] != "assistant":
        with st.chat_message("assistant", avatar='πŸ€–'):
            with st.spinner(st.session_state.thinking_prompt):
                res = st.session_state.agent.chat(prompt)
                cleaned = re.sub(r'\[\d+\]', '', res.response)
                st.write(cleaned)
            message = {"role": "assistant", "content": cleaned, "avatar": 'πŸ€–'}
            st.session_state.messages.append(message)
    
if __name__ == "__main__":
    launch_bot()