ofermend commited on
Commit
e6d4ed8
β€’
1 Parent(s): c9b5ada

version bump

Browse files
Files changed (4) hide show
  1. agent.py +2 -9
  2. app.py +3 -147
  3. requirements.txt +1 -1
  4. st_app.py +163 -0
agent.py CHANGED
@@ -4,7 +4,7 @@ from pydantic import Field, BaseModel
4
  from omegaconf import OmegaConf
5
 
6
  from llama_index.core.utilities.sql_wrapper import SQLDatabase
7
- from sqlalchemy import create_engine, text
8
 
9
  from dotenv import load_dotenv
10
  load_dotenv(override=True)
@@ -62,7 +62,7 @@ def create_assistant_tools(cfg):
62
  tool_args_schema = QueryEVLaws,
63
  reranker = "multilingual_reranker_v1", rerank_k = 100,
64
  n_sentences_before = 2, n_sentences_after = 2, lambda_val = 0.005,
65
- summary_num_results = 5,
66
  vectara_summarizer = summarizer,
67
  include_citations = False,
68
  )
@@ -84,16 +84,9 @@ def create_assistant_tools(cfg):
84
  def initialize_agent(_cfg, update_func=None):
85
  electric_vehicle_bot_instructions = """
86
  - You are a helpful research assistant, with expertise in electric vehicles, in conversation with a user.
87
- - Before answering any user query, use ev_describe_tables to understand schema of each table, and use get_sample_data
88
- to get sample data from each table in the database, so that you can understand NULL and unique values for each column.
89
  - For a query with multiple sub-questions, break down the query into the sub-questions,
90
  and make separate calls to the ask_vehicles or ask_policies tool to answer each sub-question,
91
  then combine the answers to provide a complete response.
92
- - Use the database tools (ev_load_data, ev_describe_tables and ev_list_tables) to answer analytical queries.
93
- If you cannot find the information in one of the tables, try using the other tables in the database.
94
- - IMPORTANT: When using database_tools, always call the ev_load_sample_data tool with the table you want to query
95
- to understand the table structure, column naming, and values in the table. Never call the ev_load_data tool for a query until you have called ev_load_sample_data.
96
- - When providing links, try to put the name of the website or source of information for the displayed text. Don't just say 'Source'.
97
  - Never discuss politics, and always respond politely.
98
  """
99
 
 
4
  from omegaconf import OmegaConf
5
 
6
  from llama_index.core.utilities.sql_wrapper import SQLDatabase
7
+ from sqlalchemy import create_engine
8
 
9
  from dotenv import load_dotenv
10
  load_dotenv(override=True)
 
62
  tool_args_schema = QueryEVLaws,
63
  reranker = "multilingual_reranker_v1", rerank_k = 100,
64
  n_sentences_before = 2, n_sentences_after = 2, lambda_val = 0.005,
65
+ summary_num_results = 10,
66
  vectara_summarizer = summarizer,
67
  include_citations = False,
68
  )
 
84
  def initialize_agent(_cfg, update_func=None):
85
  electric_vehicle_bot_instructions = """
86
  - You are a helpful research assistant, with expertise in electric vehicles, in conversation with a user.
 
 
87
  - For a query with multiple sub-questions, break down the query into the sub-questions,
88
  and make separate calls to the ask_vehicles or ask_policies tool to answer each sub-question,
89
  then combine the answers to provide a complete response.
 
 
 
 
 
90
  - Never discuss politics, and always respond politely.
91
  """
92
 
app.py CHANGED
@@ -1,165 +1,21 @@
1
- from PIL import Image
2
- import sys
3
  import os
 
4
  import uuid
5
 
6
- import streamlit as st
7
- from streamlit_pills import pills
8
- from streamlit_feedback import streamlit_feedback
9
 
10
  import nest_asyncio
11
  import asyncio
12
 
13
- from utils import thumbs_feedback, escape_dollars_outside_latex, send_amplitude_data
14
-
15
  import sqlite3
16
  from datasets import load_dataset
17
 
18
- from vectara_agentic.agent import AgentStatusType
19
- from agent import initialize_agent, get_agent_config
20
-
21
-
22
- initial_prompt = "How can I help you today?"
23
-
24
  # Setup for HTTP API Calls to Amplitude Analytics
25
  if 'device_id' not in st.session_state:
26
  st.session_state.device_id = str(uuid.uuid4())
27
 
28
-
29
  if "feedback_key" not in st.session_state:
30
- st.session_state.feedback_key = 0
31
-
32
- def toggle_logs():
33
- st.session_state.show_logs = not st.session_state.show_logs
34
-
35
- def show_example_questions():
36
- if len(st.session_state.example_messages) > 0 and st.session_state.first_turn:
37
- selected_example = pills("Queries to Try:", st.session_state.example_messages, index=None)
38
- if selected_example:
39
- st.session_state.ex_prompt = selected_example
40
- st.session_state.first_turn = False
41
- return True
42
- return False
43
-
44
- def update_func(status_type: AgentStatusType, msg: str):
45
- if status_type != AgentStatusType.AGENT_UPDATE:
46
- output = f"{status_type.value} - {msg}"
47
- st.session_state.log_messages.append(output)
48
-
49
- async def launch_bot():
50
- def reset():
51
- st.session_state.messages = [{"role": "assistant", "content": initial_prompt, "avatar": "πŸ¦–"}]
52
- st.session_state.thinking_message = "Agent at work..."
53
- st.session_state.log_messages = []
54
- st.session_state.prompt = None
55
- st.session_state.ex_prompt = None
56
- st.session_state.first_turn = True
57
- st.session_state.show_logs = False
58
- if 'agent' not in st.session_state:
59
- st.session_state.agent = initialize_agent(cfg, update_func=update_func)
60
-
61
- if 'cfg' not in st.session_state:
62
- cfg = get_agent_config()
63
- st.session_state.cfg = cfg
64
- st.session_state.ex_prompt = None
65
- example_messages = [example.strip() for example in cfg.examples.split(";")] if cfg.examples else []
66
- st.session_state.example_messages = [em for em in example_messages if len(em)>0]
67
- reset()
68
-
69
- cfg = st.session_state.cfg
70
-
71
- # left side content
72
- with st.sidebar:
73
- image = Image.open('Vectara-logo.png')
74
- st.image(image, width=175)
75
- st.markdown(f"## {cfg['demo_welcome']}")
76
- st.markdown(f"{cfg['demo_description']}")
77
-
78
- st.markdown("\n\n")
79
- bc1, _ = st.columns([1, 1])
80
- with bc1:
81
- if st.button('Start Over'):
82
- reset()
83
- st.rerun()
84
-
85
- st.divider()
86
- st.markdown(
87
- "## How this works?\n"
88
- "This app was built with [Vectara](https://vectara.com).\n\n"
89
- "It demonstrates the use of Agentic RAG functionality with Vectara"
90
- )
91
-
92
- if "messages" not in st.session_state.keys():
93
- reset()
94
-
95
- # Display chat messages
96
- for message in st.session_state.messages:
97
- with st.chat_message(message["role"], avatar=message["avatar"]):
98
- st.write(message["content"])
99
-
100
- example_container = st.empty()
101
- with example_container:
102
- if show_example_questions():
103
- example_container.empty()
104
- st.session_state.first_turn = False
105
- st.rerun()
106
-
107
- # User-provided prompt
108
- if st.session_state.ex_prompt:
109
- prompt = st.session_state.ex_prompt
110
- else:
111
- prompt = st.chat_input()
112
- if prompt:
113
- st.session_state.messages.append({"role": "user", "content": prompt, "avatar": 'πŸ§‘β€πŸ’»'})
114
- st.session_state.prompt = prompt # Save the prompt in session state
115
- st.session_state.log_messages = []
116
- st.session_state.show_logs = False
117
- with st.chat_message("user", avatar='πŸ§‘β€πŸ’»'):
118
- print(f"Starting new question: {prompt}\n")
119
- st.write(prompt)
120
- st.session_state.ex_prompt = None
121
-
122
- # Generate a new response if last message is not from assistant
123
- if st.session_state.prompt:
124
- with st.chat_message("assistant", avatar='πŸ€–'):
125
- with st.spinner(st.session_state.thinking_message):
126
- res = st.session_state.agent.chat(st.session_state.prompt)
127
- res = escape_dollars_outside_latex(res)
128
- message = {"role": "assistant", "content": res, "avatar": 'πŸ€–'}
129
- st.session_state.messages.append(message)
130
- st.markdown(res)
131
-
132
- send_amplitude_data(
133
- user_query=st.session_state.messages[-2]["content"],
134
- bot_response=st.session_state.messages[-1]["content"],
135
- demo_name=cfg['demo_name']
136
- )
137
-
138
- st.session_state.ex_prompt = None
139
- st.session_state.prompt = None
140
- st.session_state.first_turn = False
141
- st.rerun()
142
-
143
- # Record user feedback
144
- if (st.session_state.messages[-1]["role"] == "assistant") & (st.session_state.messages[-1]["content"] != initial_prompt):
145
- streamlit_feedback(
146
- feedback_type="thumbs", on_submit = thumbs_feedback, key = st.session_state.feedback_key,
147
- kwargs = {"user_query": st.session_state.messages[-2]["content"],
148
- "bot_response": st.session_state.messages[-1]["content"],
149
- "demo_name": cfg["demo_name"]}
150
- )
151
-
152
- log_placeholder = st.empty()
153
- with log_placeholder.container():
154
- if st.session_state.show_logs:
155
- st.button("Hide Logs", on_click=toggle_logs)
156
- for msg in st.session_state.log_messages:
157
- st.text(msg)
158
- else:
159
- if len(st.session_state.log_messages) > 0:
160
- st.button("Show Logs", on_click=toggle_logs)
161
-
162
- sys.stdout.flush()
163
 
164
  def setup_db():
165
  db_path = 'ev_database.db'
 
 
 
1
  import os
2
+ import streamlit as st
3
  import uuid
4
 
5
+ from st_app import launch_bot
 
 
6
 
7
  import nest_asyncio
8
  import asyncio
9
 
 
 
10
  import sqlite3
11
  from datasets import load_dataset
12
 
 
 
 
 
 
 
13
  # Setup for HTTP API Calls to Amplitude Analytics
14
  if 'device_id' not in st.session_state:
15
  st.session_state.device_id = str(uuid.uuid4())
16
 
 
17
  if "feedback_key" not in st.session_state:
18
+ st.session_state.feedback_key = 0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
  def setup_db():
21
  db_path = 'ev_database.db'
requirements.txt CHANGED
@@ -7,4 +7,4 @@ langdetect==1.0.9
7
  langcodes==3.4.0
8
  datasets==2.19.2
9
  uuid==1.30
10
- vectara-agentic==0.1.15
 
7
  langcodes==3.4.0
8
  datasets==2.19.2
9
  uuid==1.30
10
+ vectara-agentic==0.1.16
st_app.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import sys
3
+
4
+ import streamlit as st
5
+ from streamlit_pills import pills
6
+ from streamlit_feedback import streamlit_feedback
7
+
8
+ from utils import thumbs_feedback, escape_dollars_outside_latex, send_amplitude_data
9
+
10
+ from vectara_agentic.agent import AgentStatusType
11
+ from agent import initialize_agent, get_agent_config
12
+
13
+ initial_prompt = "How can I help you today?"
14
+
15
+ def toggle_logs():
16
+ st.session_state.show_logs = not st.session_state.show_logs
17
+
18
+ def show_example_questions():
19
+ if len(st.session_state.example_messages) > 0 and st.session_state.first_turn:
20
+ selected_example = pills("Queries to Try:", st.session_state.example_messages, index=None)
21
+ if selected_example:
22
+ st.session_state.ex_prompt = selected_example
23
+ st.session_state.first_turn = False
24
+ return True
25
+ return False
26
+
27
+ def update_func(status_type: AgentStatusType, msg: str):
28
+ if status_type != AgentStatusType.AGENT_UPDATE:
29
+ output = f"{status_type.value} - {msg}"
30
+ st.session_state.log_messages.append(output)
31
+
32
+ async def launch_bot():
33
+ def reset():
34
+ st.session_state.messages = [{"role": "assistant", "content": initial_prompt, "avatar": "πŸ¦–"}]
35
+ st.session_state.thinking_message = "Agent at work..."
36
+ st.session_state.log_messages = []
37
+ st.session_state.prompt = None
38
+ st.session_state.ex_prompt = None
39
+ st.session_state.first_turn = True
40
+ st.session_state.logs_enabled = False
41
+ st.session_state.show_logs = False
42
+ if 'agent' not in st.session_state:
43
+ st.session_state.agent = initialize_agent(cfg, update_func=update_func)
44
+ else:
45
+ st.session_state.agent.clear_memory()
46
+
47
+ if 'cfg' not in st.session_state:
48
+ cfg = get_agent_config()
49
+ st.session_state.cfg = cfg
50
+ st.session_state.ex_prompt = None
51
+ example_messages = [example.strip() for example in cfg.examples.split(";")] if cfg.examples else []
52
+ st.session_state.example_messages = [em for em in example_messages if len(em)>0]
53
+ reset()
54
+
55
+ cfg = st.session_state.cfg
56
+
57
+ # left side content
58
+ with st.sidebar:
59
+ image = Image.open('Vectara-logo.png')
60
+ st.image(image, width=175)
61
+ st.markdown(f"## {cfg['demo_welcome']}")
62
+ st.markdown(f"{cfg['demo_description']}")
63
+
64
+ st.markdown("\n\n")
65
+ bc1, bc2 = st.columns([1, 1])
66
+ with bc1:
67
+ if st.button('Start Over'):
68
+ reset()
69
+ st.rerun()
70
+ with bc2: # Updated button for enabling/disabling logs
71
+ if st.session_state.logs_enabled:
72
+ if st.button('Disable Logs', key='disable_logs'):
73
+ st.session_state.logs_enabled = False
74
+ st.rerun()
75
+ else:
76
+ if st.button('Enable Logs', key='enable_logs'):
77
+ st.session_state.logs_enabled = True
78
+ st.rerun()
79
+
80
+ st.divider()
81
+ st.markdown(
82
+ "## How this works?\n"
83
+ "This app was built with [Vectara](https://vectara.com).\n\n"
84
+ "It demonstrates the use of Agentic RAG functionality with Vectara"
85
+ )
86
+
87
+ if "messages" not in st.session_state.keys():
88
+ reset()
89
+
90
+ # Display chat messages
91
+ for message in st.session_state.messages:
92
+ with st.chat_message(message["role"], avatar=message["avatar"]):
93
+ st.write(message["content"])
94
+
95
+ example_container = st.empty()
96
+ with example_container:
97
+ if show_example_questions():
98
+ example_container.empty()
99
+ st.session_state.first_turn = False
100
+ st.rerun()
101
+
102
+ # User-provided prompt
103
+ if st.session_state.ex_prompt:
104
+ prompt = st.session_state.ex_prompt
105
+ else:
106
+ prompt = st.chat_input()
107
+ if prompt:
108
+ st.session_state.messages.append({"role": "user", "content": prompt, "avatar": 'πŸ§‘β€πŸ’»'})
109
+ st.session_state.prompt = prompt # Save the prompt in session state
110
+ st.session_state.log_messages = []
111
+ st.session_state.show_logs = False
112
+ with st.chat_message("user", avatar='πŸ§‘β€πŸ’»'):
113
+ print(f"Starting new question: {prompt}\n")
114
+ st.write(prompt)
115
+ st.session_state.ex_prompt = None
116
+
117
+ # Generate a new response if last message is not from assistant
118
+ if st.session_state.prompt:
119
+ with st.chat_message("assistant", avatar='πŸ€–'):
120
+ with st.spinner(st.session_state.thinking_message):
121
+ res = st.session_state.agent.chat(st.session_state.prompt)
122
+ res = escape_dollars_outside_latex(res)
123
+ message = {"role": "assistant", "content": res, "avatar": 'πŸ€–'}
124
+ st.session_state.messages.append(message)
125
+ st.markdown(res)
126
+
127
+ send_amplitude_data(
128
+ user_query=st.session_state.messages[-2]["content"],
129
+ bot_response=st.session_state.messages[-1]["content"],
130
+ demo_name=cfg['demo_name']
131
+ )
132
+
133
+ st.session_state.ex_prompt = None
134
+ st.session_state.prompt = None
135
+ st.session_state.first_turn = False
136
+ st.rerun()
137
+
138
+ # Record user feedback
139
+ if (st.session_state.messages[-1]["role"] == "assistant") & (st.session_state.messages[-1]["content"] != initial_prompt):
140
+ if st.session_state.show_logs and st.session_state.logs_enabled: # Only show logs if enabled
141
+ streamlit_feedback(
142
+ feedback_type="thumbs", on_submit=thumbs_feedback, key=st.session_state.feedback_key,
143
+ kwargs={"user_query": st.session_state.messages[-2]["content"],
144
+ "bot_response": st.session_state.messages[-1]["content"],
145
+ "demo_name": cfg["demo_name"]}
146
+ )
147
+
148
+ log_placeholder = st.empty()
149
+ with log_placeholder.container():
150
+ if st.session_state.logs_enabled: # Show logs button only if log toggle is enabled
151
+ if st.session_state.show_logs:
152
+ st.button("Hide Logs", on_click=toggle_logs)
153
+ for msg in st.session_state.log_messages:
154
+ if len(msg) > 100: # Use text_area for longer messages
155
+ st.text_area(label="Log", value=msg, height=100, disabled=True)
156
+ else:
157
+ st.text(msg)
158
+ else:
159
+ if len(st.session_state.log_messages) > 0:
160
+ st.button("Show Logs", on_click=toggle_logs)
161
+
162
+
163
+ sys.stdout.flush()