debate-bot / app.py
ofermend's picture
updates
4f860ce
raw
history blame
3.08 kB
from omegaconf import OmegaConf
from query import VectaraQuery
import streamlit as st
import os
topics = {
'Standardized testing in education': '266',
'Ethical implications of genetic editing': '267'
}
def launch_bot():
def generate_response(question, cfg):
response = vq.submit_query(question, cfg.bot_role, cfg.topic, cfg.style)
return response
if 'cfg' not in st.session_state:
cfg = OmegaConf.create({
'customer_id': str(os.environ['VECTARA_CUSTOMER_ID']),
'corpus_id': list(topics.values())[0],
'api_key': str(os.environ['VECTARA_API_KEY']),
'prompt_name': 'vectara-experimental-summary-ext-2023-12-11-large',
})
st.session_state.cfg = cfg
st.session_state.vq = VectaraQuery(cfg.api_key, cfg.customer_id, cfg.corpus_id, cfg.prompt_name)
cfg = st.session_state.cfg
vq = st.session_state.vq
st.set_page_config(page_title="Debate Bot", layout="wide")
# left side content
with st.sidebar:
st.markdown(f"## Welcome to Debate Bot.\n\n\n")
role_options = ['in opposition to', 'in support of']
cfg.human_role = st.selectbox('Your are:', role_options)
cfg.bot_role = role_options[1] if cfg.human_role == role_options[0] else role_options[0]
topic_options = list(topics.keys())
cfg.topic = st.selectbox('The topic:', topic_options)
vq.corpus_id = topics[cfg.topic]
st.markdown("\n")
debate_styles = ['Lincoln-Douglas', 'Spontaneous Argumentation', 'Parliamentary debates']
cfg.style = st.selectbox('Debate Style:', debate_styles)
st.markdown("---")
st.markdown(
"## How this works?\n"
"This app was built with [Vectara](https://vectara.com).\n\n"
"It demonstrates the use of the Chat functionality along with custom prompts and GPT4-Turbo (as part of our Scale plan)"
)
st.markdown("---")
if "messages" not in st.session_state.keys():
st.session_state.messages = [{"role": "assistant", "content": f"Please make your opening statement.", "avatar": 'πŸ¦–'}]
# Display chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"], avatar=message["avatar"]):
st.write(message["content"])
# User-provided prompt
if prompt := st.chat_input():
st.session_state.messages.append({"role": "user", "content": prompt, "avatar": 'πŸ§‘β€πŸ’»'})
with st.chat_message("user", avatar='πŸ§‘β€πŸ’»'):
st.write(prompt)
# Generate a new response if last message is not from assistant
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant", avatar='πŸ€–'):
stream = generate_response(prompt, cfg)
response = st.write_stream(stream)
message = {"role": "assistant", "content": response, "avatar": 'πŸ€–'}
st.session_state.messages.append(message)
if __name__ == "__main__":
launch_bot()