File size: 4,076 Bytes
30d349c
 
 
 
 
1f13f87
30d349c
4f860ce
 
e5c2179
 
4f860ce
 
30d349c
4f860ce
 
30d349c
 
4b1db34
 
cddbc52
4b1db34
30d349c
 
 
4f860ce
30d349c
 
 
 
 
 
4b1db34
 
 
 
30d349c
 
 
 
 
 
 
1f13f87
 
824d371
 
 
cddbc52
824d371
4b1db34
 
 
30d349c
4f860ce
 
 
4b1db34
 
 
4f860ce
 
 
 
4b1db34
 
 
 
 
 
 
4f860ce
30d349c
 
 
4f860ce
2f3e659
30d349c
 
 
4b1db34
30d349c
4b1db34
30d349c
 
 
4f860ce
30d349c
 
 
 
4f860ce
 
30d349c
4b1db34
30d349c
 
4f860ce
 
966dd34
 
4f860ce
30d349c
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107

from omegaconf import OmegaConf
from query import VectaraQuery
import streamlit as st
import os
from PIL import Image

topics =  { 
    'Standardized testing in education': '266',
    'Ethical implications of genetic editing': '267',
    'Social media and mental health': '268'
}

def launch_bot():
    def generate_response(question, cfg):
        response = vq.submit_query(question, cfg.bot_role, cfg.topic, cfg.style)
        return response

    def reset():
        st.session_state.messages = [{"role": "assistant", "content": "Please make your opening statement.", "avatar": 'πŸ¦–'}]
        st.session_state.vq = VectaraQuery(cfg.api_key, cfg.customer_id, cfg.corpus_id, cfg.prompt_name)

    if 'cfg' not in st.session_state:
        cfg = OmegaConf.create({
            'customer_id': str(os.environ['VECTARA_CUSTOMER_ID']),
            'corpus_id': list(topics.values())[0],
            'api_key': str(os.environ['VECTARA_API_KEY']),
            'prompt_name': 'vectara-experimental-summary-ext-2023-12-11-large',
        })
        st.session_state.cfg = cfg
        st.session_state.vq = VectaraQuery(cfg.api_key, cfg.customer_id, cfg.corpus_id, cfg.prompt_name)

        st.session_state.current_human_role = None
        st.session_state.current_topic = None
        st.session_state.style = None

    cfg = st.session_state.cfg
    vq = st.session_state.vq
    st.set_page_config(page_title="Debate Bot", layout="wide")


    # left side content
    with st.sidebar:
        image = Image.open('Vectara-logo.png')
        st.image(image, width=250)
        st.markdown(f"## Welcome to Debate Bot.\n\n\n")

        role_options = ['in opposition to', 'in support of']
        cfg.human_role = st.selectbox('You (the human) are:', role_options)
        cfg.bot_role = role_options[1] if cfg.human_role == role_options[0] else role_options[0]
        if st.session_state.current_human_role != cfg.human_role:
            st.session_state.current_human_role = cfg.human_role
            reset()

        topic_options = list(topics.keys())
        cfg.topic = st.selectbox('The topic:', topic_options)
        vq.corpus_id = topics[cfg.topic]
        if st.session_state.current_topic != cfg.topic:
            st.session_state.current_topic = cfg.topic
            reset()

        st.markdown("\n")
        debate_styles = ['Lincoln-Douglas', 'Spontaneous Argumentation', 'Parliamentary debates']
        cfg.style = st.selectbox('Debate Style:', debate_styles)
        if st.session_state.style != cfg.style:
            st.session_state.style = cfg.style
            reset()

        st.markdown("\n\n")
        if st.button('Start Over'):
            reset()

        st.markdown("---")
        st.markdown(
            "## How this works?\n"
            "This app was built with [Vectara](https://vectara.com).\n\n"
            "It demonstrates the use of the Chat functionality along with custom prompts and GPT4-Turbo (as part of our [Scale plan](https://vectara.com/pricing/))"
        )
        st.markdown("---")


    if "messages" not in st.session_state.keys():
        reset()

    # Display chat messages
    for message in st.session_state.messages:
        with st.chat_message(message["role"], avatar=message["avatar"]):
            st.write(message["content"])

    # User-provided prompt
    if prompt := st.chat_input():
        st.session_state.messages.append({"role": "user", "content": prompt, "avatar": 'πŸ§‘β€πŸ’»'})
        with st.chat_message("user", avatar='πŸ§‘β€πŸ’»'):
            st.write(prompt)

    # Generate a new response if last message is not from assistant
    if st.session_state.messages[-1]["role"] != "assistant":
        with st.chat_message("assistant", avatar='πŸ€–'):
            stream = generate_response(prompt, cfg) 
            with st.spinner('Thinking...'):
                response = st.write_stream(stream) 
            message = {"role": "assistant", "content": response, "avatar": 'πŸ€–'}
            st.session_state.messages.append(message)
    
if __name__ == "__main__":
    launch_bot()