Spaces:
Running
Running
added sample questions
Browse files- app.py +36 -8
- requirements.txt +2 -1
app.py
CHANGED
@@ -1,10 +1,12 @@
|
|
1 |
|
2 |
-
from omegaconf import OmegaConf
|
3 |
-
import streamlit as st
|
4 |
import os
|
5 |
from PIL import Image
|
6 |
import sys
|
7 |
|
|
|
|
|
|
|
|
|
8 |
from dotenv import load_dotenv
|
9 |
load_dotenv(override=True)
|
10 |
|
@@ -18,7 +20,6 @@ languages = {'English': 'en', 'Spanish': 'es', 'French': 'fr', 'German': 'de', '
|
|
18 |
'Hebrew': 'he', 'Hindi': 'hi', 'Italian': 'it', 'Japanese': 'ja', 'Korean': 'ko', 'Portuguese': 'pt'}
|
19 |
initial_prompt = "How can I help you today?"
|
20 |
|
21 |
-
|
22 |
def create_tools(cfg):
|
23 |
|
24 |
def adjust_response_to_student(
|
@@ -108,6 +109,16 @@ def initialize_agent(_cfg):
|
|
108 |
def toggle_logs():
|
109 |
st.session_state.show_logs = not st.session_state.show_logs
|
110 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
def launch_bot():
|
112 |
def reset():
|
113 |
cfg = st.session_state.cfg
|
@@ -116,6 +127,7 @@ def launch_bot():
|
|
116 |
st.session_state.agent = initialize_agent(cfg)
|
117 |
st.session_state.log_messages = []
|
118 |
st.session_state.prompt = None
|
|
|
119 |
st.session_state.show_logs = False
|
120 |
|
121 |
st.set_page_config(page_title="Justice Harvard Teaching Assistant", layout="wide")
|
@@ -126,15 +138,18 @@ def launch_bot():
|
|
126 |
'api_key': str(os.environ['VECTARA_API_KEY']),
|
127 |
'style': teaching_styles[0],
|
128 |
'language': 'English',
|
129 |
-
'student_age': 18
|
130 |
-
|
131 |
})
|
132 |
st.session_state.cfg = cfg
|
133 |
st.session_state.style = cfg.style
|
134 |
st.session_state.language = cfg.language
|
135 |
st.session_state.student_age = cfg.student_age
|
136 |
-
|
|
|
|
|
137 |
reset()
|
|
|
138 |
cfg = st.session_state.cfg
|
139 |
|
140 |
# left side content
|
@@ -187,8 +202,18 @@ def launch_bot():
|
|
187 |
with st.chat_message(message["role"], avatar=message["avatar"]):
|
188 |
st.write(message["content"])
|
189 |
|
|
|
|
|
|
|
|
|
|
|
|
|
190 |
# User-provided prompt
|
191 |
-
if
|
|
|
|
|
|
|
|
|
192 |
st.session_state.messages.append({"role": "user", "content": prompt, "avatar": 'π§βπ»'})
|
193 |
st.session_state.prompt = prompt # Save the prompt in session state
|
194 |
st.session_state.log_messages = []
|
@@ -196,6 +221,7 @@ def launch_bot():
|
|
196 |
with st.chat_message("user", avatar='π§βπ»'):
|
197 |
print(f"Starting new question: {prompt}\n")
|
198 |
st.write(prompt)
|
|
|
199 |
|
200 |
# Generate a new response if last message is not from assistant
|
201 |
if st.session_state.prompt:
|
@@ -206,7 +232,9 @@ def launch_bot():
|
|
206 |
message = {"role": "assistant", "content": res, "avatar": 'π€'}
|
207 |
st.session_state.messages.append(message)
|
208 |
st.markdown(res)
|
209 |
-
|
|
|
|
|
210 |
|
211 |
log_placeholder = st.empty()
|
212 |
with log_placeholder.container():
|
|
|
1 |
|
|
|
|
|
2 |
import os
|
3 |
from PIL import Image
|
4 |
import sys
|
5 |
|
6 |
+
from omegaconf import OmegaConf
|
7 |
+
import streamlit as st
|
8 |
+
from streamlit_pills import pills
|
9 |
+
|
10 |
from dotenv import load_dotenv
|
11 |
load_dotenv(override=True)
|
12 |
|
|
|
20 |
'Hebrew': 'he', 'Hindi': 'hi', 'Italian': 'it', 'Japanese': 'ja', 'Korean': 'ko', 'Portuguese': 'pt'}
|
21 |
initial_prompt = "How can I help you today?"
|
22 |
|
|
|
23 |
def create_tools(cfg):
|
24 |
|
25 |
def adjust_response_to_student(
|
|
|
109 |
def toggle_logs():
|
110 |
st.session_state.show_logs = not st.session_state.show_logs
|
111 |
|
112 |
+
def show_example_questions():
|
113 |
+
if len(st.session_state.example_messages) > 0 and st.session_state.first_turn:
|
114 |
+
selected_example = pills("Queries to Try:", st.session_state.example_messages, index=None)
|
115 |
+
if selected_example:
|
116 |
+
st.session_state.ex_prompt = selected_example
|
117 |
+
st.session_state.first_turn = False
|
118 |
+
return True
|
119 |
+
return False
|
120 |
+
|
121 |
+
|
122 |
def launch_bot():
|
123 |
def reset():
|
124 |
cfg = st.session_state.cfg
|
|
|
127 |
st.session_state.agent = initialize_agent(cfg)
|
128 |
st.session_state.log_messages = []
|
129 |
st.session_state.prompt = None
|
130 |
+
st.session_state.first_turn = True
|
131 |
st.session_state.show_logs = False
|
132 |
|
133 |
st.set_page_config(page_title="Justice Harvard Teaching Assistant", layout="wide")
|
|
|
138 |
'api_key': str(os.environ['VECTARA_API_KEY']),
|
139 |
'style': teaching_styles[0],
|
140 |
'language': 'English',
|
141 |
+
'student_age': 18,
|
142 |
+
'examples': os.environ.get('QUERY_EXAMPLES', None)
|
143 |
})
|
144 |
st.session_state.cfg = cfg
|
145 |
st.session_state.style = cfg.style
|
146 |
st.session_state.language = cfg.language
|
147 |
st.session_state.student_age = cfg.student_age
|
148 |
+
st.session_state.ex_prompt = None
|
149 |
+
example_messages = [example.strip() for example in cfg.examples.split(",")] if cfg.examples else []
|
150 |
+
st.session_state.example_messages = [em for em in example_messages if len(em)>0]
|
151 |
reset()
|
152 |
+
|
153 |
cfg = st.session_state.cfg
|
154 |
|
155 |
# left side content
|
|
|
202 |
with st.chat_message(message["role"], avatar=message["avatar"]):
|
203 |
st.write(message["content"])
|
204 |
|
205 |
+
example_container = st.empty()
|
206 |
+
with example_container:
|
207 |
+
if show_example_questions():
|
208 |
+
example_container.empty()
|
209 |
+
st.rerun()
|
210 |
+
|
211 |
# User-provided prompt
|
212 |
+
if st.session_state.ex_prompt:
|
213 |
+
prompt = st.session_state.ex_prompt
|
214 |
+
else:
|
215 |
+
prompt = st.chat_input()
|
216 |
+
if prompt:
|
217 |
st.session_state.messages.append({"role": "user", "content": prompt, "avatar": 'π§βπ»'})
|
218 |
st.session_state.prompt = prompt # Save the prompt in session state
|
219 |
st.session_state.log_messages = []
|
|
|
221 |
with st.chat_message("user", avatar='π§βπ»'):
|
222 |
print(f"Starting new question: {prompt}\n")
|
223 |
st.write(prompt)
|
224 |
+
st.session_state.ex_prompt = None
|
225 |
|
226 |
# Generate a new response if last message is not from assistant
|
227 |
if st.session_state.prompt:
|
|
|
232 |
message = {"role": "assistant", "content": res, "avatar": 'π€'}
|
233 |
st.session_state.messages.append(message)
|
234 |
st.markdown(res)
|
235 |
+
st.session_state.ex_prompt = None
|
236 |
+
st.session_state.prompt = None
|
237 |
+
st.rerun()
|
238 |
|
239 |
log_placeholder = st.empty()
|
240 |
with log_placeholder.container():
|
requirements.txt
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
omegaconf==2.3.0
|
2 |
-
streamlit==1.32.2
|
3 |
pydantic==1.10.15
|
4 |
python-dotenv==1.0.1
|
|
|
|
|
5 |
git+https://{GITHUB_TOKEN}@github.com/vectara/vectara-agent.git
|
|
|
1 |
omegaconf==2.3.0
|
|
|
2 |
pydantic==1.10.15
|
3 |
python-dotenv==1.0.1
|
4 |
+
streamlit==1.32.2
|
5 |
+
streamlit_pills==0.3.0
|
6 |
git+https://{GITHUB_TOKEN}@github.com/vectara/vectara-agent.git
|