ofermend commited on
Commit
4f860ce
β€’
1 Parent(s): 824d371
Files changed (2) hide show
  1. app.py +25 -15
  2. query.py +9 -6
app.py CHANGED
@@ -4,20 +4,22 @@ from query import VectaraQuery
4
  import streamlit as st
5
  import os
6
 
 
 
 
 
 
7
  def launch_bot():
8
- def generate_response(question, role, topic):
9
- response = vq.submit_query(question, role, topic)
10
  return response
11
 
12
  if 'cfg' not in st.session_state:
13
  cfg = OmegaConf.create({
14
  'customer_id': str(os.environ['VECTARA_CUSTOMER_ID']),
15
- 'corpus_id': str(os.environ['VECTARA_CORPUS_ID']),
16
  'api_key': str(os.environ['VECTARA_API_KEY']),
17
  'prompt_name': 'vectara-experimental-summary-ext-2023-12-11-large',
18
- 'topic': 'Standardized testing in education',
19
- 'human_role': 'in opposition to',
20
- 'bot_role': 'in support of'
21
  })
22
  st.session_state.cfg = cfg
23
  st.session_state.vq = VectaraQuery(cfg.api_key, cfg.customer_id, cfg.corpus_id, cfg.prompt_name)
@@ -35,34 +37,42 @@ def launch_bot():
35
  cfg.human_role = st.selectbox('Your are:', role_options)
36
  cfg.bot_role = role_options[1] if cfg.human_role == role_options[0] else role_options[0]
37
 
38
- st.markdown(f"{cfg.topic}.\n\n")
 
 
 
 
 
 
 
39
  st.markdown("---")
40
  st.markdown(
41
  "## How this works?\n"
42
- "This app was built with [Vectara](https://vectara.com).\n"
 
43
  )
44
  st.markdown("---")
45
 
46
  if "messages" not in st.session_state.keys():
47
- st.session_state.messages = [{"role": "assistant", "content": f"Please make your opening statment."}]
48
 
49
  # Display chat messages
50
  for message in st.session_state.messages:
51
- with st.chat_message(message["role"]):
52
  st.write(message["content"])
53
 
54
  # User-provided prompt
55
  if prompt := st.chat_input():
56
- st.session_state.messages.append({"role": "user", "content": prompt})
57
- with st.chat_message("user"):
58
  st.write(prompt)
59
 
60
  # Generate a new response if last message is not from assistant
61
  if st.session_state.messages[-1]["role"] != "assistant":
62
- with st.chat_message("assistant"):
63
- stream = generate_response(prompt, cfg.bot_role, cfg.topic)
64
  response = st.write_stream(stream)
65
- message = {"role": "assistant", "content": response}
66
  st.session_state.messages.append(message)
67
 
68
  if __name__ == "__main__":
 
4
  import streamlit as st
5
  import os
6
 
7
+ topics = {
8
+ 'Standardized testing in education': '266',
9
+ 'Ethical implications of genetic editing': '267'
10
+ }
11
+
12
  def launch_bot():
13
+ def generate_response(question, cfg):
14
+ response = vq.submit_query(question, cfg.bot_role, cfg.topic, cfg.style)
15
  return response
16
 
17
  if 'cfg' not in st.session_state:
18
  cfg = OmegaConf.create({
19
  'customer_id': str(os.environ['VECTARA_CUSTOMER_ID']),
20
+ 'corpus_id': list(topics.values())[0],
21
  'api_key': str(os.environ['VECTARA_API_KEY']),
22
  'prompt_name': 'vectara-experimental-summary-ext-2023-12-11-large',
 
 
 
23
  })
24
  st.session_state.cfg = cfg
25
  st.session_state.vq = VectaraQuery(cfg.api_key, cfg.customer_id, cfg.corpus_id, cfg.prompt_name)
 
37
  cfg.human_role = st.selectbox('Your are:', role_options)
38
  cfg.bot_role = role_options[1] if cfg.human_role == role_options[0] else role_options[0]
39
 
40
+ topic_options = list(topics.keys())
41
+ cfg.topic = st.selectbox('The topic:', topic_options)
42
+ vq.corpus_id = topics[cfg.topic]
43
+
44
+ st.markdown("\n")
45
+ debate_styles = ['Lincoln-Douglas', 'Spontaneous Argumentation', 'Parliamentary debates']
46
+ cfg.style = st.selectbox('Debate Style:', debate_styles)
47
+
48
  st.markdown("---")
49
  st.markdown(
50
  "## How this works?\n"
51
+ "This app was built with [Vectara](https://vectara.com).\n\n"
52
+ "It demonstrates the use of the Chat functionality along with custom prompts and GPT4-Turbo (as part of our Scale plan)"
53
  )
54
  st.markdown("---")
55
 
56
  if "messages" not in st.session_state.keys():
57
+ st.session_state.messages = [{"role": "assistant", "content": f"Please make your opening statement.", "avatar": 'πŸ¦–'}]
58
 
59
  # Display chat messages
60
  for message in st.session_state.messages:
61
+ with st.chat_message(message["role"], avatar=message["avatar"]):
62
  st.write(message["content"])
63
 
64
  # User-provided prompt
65
  if prompt := st.chat_input():
66
+ st.session_state.messages.append({"role": "user", "content": prompt, "avatar": 'πŸ§‘β€πŸ’»'})
67
+ with st.chat_message("user", avatar='πŸ§‘β€πŸ’»'):
68
  st.write(prompt)
69
 
70
  # Generate a new response if last message is not from assistant
71
  if st.session_state.messages[-1]["role"] != "assistant":
72
+ with st.chat_message("assistant", avatar='πŸ€–'):
73
+ stream = generate_response(prompt, cfg)
74
  response = st.write_stream(stream)
75
+ message = {"role": "assistant", "content": response, "avatar": 'πŸ€–'}
76
  st.session_state.messages.append(message)
77
 
78
  if __name__ == "__main__":
query.py CHANGED
@@ -16,7 +16,7 @@ class VectaraQuery():
16
  self.prompt_name = prompt_name if prompt_name else "vectara-experimental-summary-ext-2023-12-11-large"
17
  self.conv_id = None
18
 
19
- def get_body(self, user_response: str, role: str, topic: str):
20
  corpora_key_list = [{
21
  'customer_id': self.customer_id, 'corpus_id': self.corpus_id, 'lexical_interpolation_config': {'lambda': 0.025}
22
  }]
@@ -25,7 +25,9 @@ class VectaraQuery():
25
  [
26
  {{
27
  "role": "system",
28
- "content": "You are a professional debate bot. You are provided with search results related to {topic}
 
 
29
  and respond to the previous arugments made so far. Be sure to provide a thoughtful and convincing reply.
30
  Never mention search results explicitly in your response.
31
  Do not base your response on information or knowledge that is not in the search results.
@@ -41,9 +43,10 @@ class VectaraQuery():
41
  }},
42
  {{
43
  "role": "user",
44
- "content": "provide a convincing reply {role} {topic}.
45
  Consider the search results as relevant information with which to form your response.
46
- Do not repeat earlier arguments and make sure your new response is coherent with the previous arguments, and responsive to the last argument: {user_response}."
 
47
  }}
48
  ]
49
  '''
@@ -93,10 +96,10 @@ class VectaraQuery():
93
  "grpc-timeout": "60S"
94
  }
95
 
96
- def submit_query(self, query_str: str, role: str, topic: str):
97
 
98
  endpoint = f"https://api.vectara.io/v1/stream-query"
99
- body = self.get_body(query_str, role, topic)
100
 
101
  response = requests.post(endpoint, data=json.dumps(body), verify=True, headers=self.get_headers(), stream=True)
102
  if response.status_code != 200:
 
16
  self.prompt_name = prompt_name if prompt_name else "vectara-experimental-summary-ext-2023-12-11-large"
17
  self.conv_id = None
18
 
19
+ def get_body(self, user_response: str, role: str, topic: str, style: str):
20
  corpora_key_list = [{
21
  'customer_id': self.customer_id, 'corpus_id': self.corpus_id, 'lexical_interpolation_config': {'lambda': 0.025}
22
  }]
 
25
  [
26
  {{
27
  "role": "system",
28
+ "content": "You are a professional debate bot.
29
+ You specialize in the {style} debate style.
30
+ You are provided with search results related to {topic}
31
  and respond to the previous arugments made so far. Be sure to provide a thoughtful and convincing reply.
32
  Never mention search results explicitly in your response.
33
  Do not base your response on information or knowledge that is not in the search results.
 
43
  }},
44
  {{
45
  "role": "user",
46
+ "content": "provide a convincing reply {role} {topic} in response to the last argument '{user_response}.
47
  Consider the search results as relevant information with which to form your response.
48
+ Use the {style} debate style to make your argument.
49
+ Do not repeat earlier arguments and make sure your new response is coherent with the previous arguments."
50
  }}
51
  ]
52
  '''
 
96
  "grpc-timeout": "60S"
97
  }
98
 
99
+ def submit_query(self, query_str: str, bot_role: str, topic: str, style: str):
100
 
101
  endpoint = f"https://api.vectara.io/v1/stream-query"
102
+ body = self.get_body(query_str, bot_role, topic, style)
103
 
104
  response = requests.post(endpoint, data=json.dumps(body), verify=True, headers=self.get_headers(), stream=True)
105
  if response.status_code != 200: