ofermend commited on
Commit
1dac99b
1 Parent(s): 0160239

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -38
app.py CHANGED
@@ -7,21 +7,11 @@ import os
7
  import streamlit as st
8
  from PIL import Image
9
 
10
- def generate_response(question):
11
- response = st.session_state.vq.submit_query(question)
12
- return response
13
-
14
- def process_prompt(prompt):
15
- # Your logic to process the prompt and generate response
16
- st.session_state.messages.append({"role": "user", "content": prompt})
17
- with st.chat_message("assistant"):
18
- with st.spinner("Thinking..."):
19
- response = generate_response(prompt)
20
- st.write(response)
21
- message = {"role": "assistant", "content": response}
22
- st.session_state.messages.append(message)
23
 
24
  def launch_bot():
 
 
 
25
 
26
  if 'cfg' not in st.session_state:
27
  corpus_ids = str(os.environ['corpus_ids']).split(',')
@@ -39,13 +29,14 @@ def launch_bot():
39
  st.session_state.vq = VectaraQuery(cfg.api_key, cfg.customer_id, cfg.corpus_ids)
40
 
41
  cfg = st.session_state.cfg
 
42
  st.set_page_config(page_title=cfg.title, layout="wide")
43
 
44
  # left side content
45
  with st.sidebar:
46
  image = Image.open('Vectara-logo.png')
47
  st.markdown(f"## Welcome to {cfg.title}\n\n"
48
- f"With this demo uses Retrieval Augmented Generation to ask questions about {cfg.source_data_desc}\n\n")
49
 
50
  st.markdown("---")
51
  st.markdown(
@@ -62,33 +53,26 @@ def launch_bot():
62
 
63
  if "messages" not in st.session_state.keys():
64
  st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}]
65
-
66
  # Display chat messages
67
  for message in st.session_state.messages:
68
  with st.chat_message(message["role"]):
69
  st.write(message["content"])
70
 
71
- # Always allow typing in the chat input box
72
- user_input = st.text_input("Type your question here...", key="user_input")
73
-
74
- # Display example questions only during the first round
75
- if len(st.session_state.messages) == 0: # Only show examples in the first round
76
- for example in cfg.examples:
77
- if st.button(example):
78
- user_input = example # Directly use the example as input
79
- # Process the input immediately
80
- response = generate_response(user_input)
81
- st.session_state.messages.append(user_input)
82
- st.session_state.messages.append(response)
83
- st.experimental_rerun() # Rerun to refresh and show the response
84
-
85
- if st.button("Submit"):
86
- if user_input: # Ensure there's something to process
87
- response = generate_response(user_input)
88
- st.session_state.messages.append(user_input)
89
- st.session_state.messages.append(response)
90
- # Clear the input (workaround as direct clearing isn't supported)
91
- st.experimental_rerun() # Rerun to refresh and clear the input box
92
-
93
  if __name__ == "__main__":
94
- launch_bot()
 
7
  import streamlit as st
8
  from PIL import Image
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
  def launch_bot():
12
+ def generate_response(question):
13
+ response = vq.submit_query(question)
14
+ return response
15
 
16
  if 'cfg' not in st.session_state:
17
  corpus_ids = str(os.environ['corpus_ids']).split(',')
 
29
  st.session_state.vq = VectaraQuery(cfg.api_key, cfg.customer_id, cfg.corpus_ids)
30
 
31
  cfg = st.session_state.cfg
32
+ vq = st.session_state.vq
33
  st.set_page_config(page_title=cfg.title, layout="wide")
34
 
35
  # left side content
36
  with st.sidebar:
37
  image = Image.open('Vectara-logo.png')
38
  st.markdown(f"## Welcome to {cfg.title}\n\n"
39
+ f"This demo uses Retrieval Augmented Generation to ask questions about {cfg.source_data_desc}\n\n")
40
 
41
  st.markdown("---")
42
  st.markdown(
 
53
 
54
  if "messages" not in st.session_state.keys():
55
  st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}]
56
+
57
  # Display chat messages
58
  for message in st.session_state.messages:
59
  with st.chat_message(message["role"]):
60
  st.write(message["content"])
61
 
62
+ # User-provided prompt
63
+ if prompt := st.chat_input():
64
+ st.session_state.messages.append({"role": "user", "content": prompt})
65
+ with st.chat_message("user"):
66
+ st.write(prompt)
67
+
68
+ # Generate a new response if last message is not from assistant
69
+ if st.session_state.messages[-1]["role"] != "assistant":
70
+ with st.chat_message("assistant"):
71
+ with st.spinner("Thinking..."):
72
+ response = generate_response(prompt)
73
+ st.write(response)
74
+ message = {"role": "assistant", "content": response}
75
+ st.session_state.messages.append(message)
76
+
 
 
 
 
 
 
 
77
  if __name__ == "__main__":
78
+ launch_bot()