Spaces:
Running
Running
github-actions
commited on
Commit
β’
ba905bd
1
Parent(s):
a70549c
Sync updates from source repository
Browse files
app.py
CHANGED
@@ -10,6 +10,8 @@ from streamlit_feedback import streamlit_feedback
|
|
10 |
|
11 |
from utils import thumbs_feedback, send_amplitude_data, escape_dollars_outside_latex
|
12 |
|
|
|
|
|
13 |
|
14 |
max_examples = 6
|
15 |
languages = {'English': 'eng', 'Spanish': 'spa', 'French': 'fra', 'Chinese': 'zho', 'German': 'deu', 'Hindi': 'hin', 'Arabic': 'ara',
|
@@ -32,7 +34,7 @@ def isTrue(x) -> bool:
|
|
32 |
|
33 |
def launch_bot():
|
34 |
def reset():
|
35 |
-
st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}]
|
36 |
st.session_state.ex_prompt = None
|
37 |
st.session_state.first_turn = True
|
38 |
|
@@ -84,7 +86,7 @@ def launch_bot():
|
|
84 |
image = Image.open('Vectara-logo.png')
|
85 |
st.image(image, width=175)
|
86 |
st.markdown(f"## About\n\n"
|
87 |
-
f"This demo uses
|
88 |
|
89 |
cfg.language = st.selectbox('Language:', languages.keys())
|
90 |
if st.session_state.language != cfg.language:
|
@@ -103,11 +105,8 @@ def launch_bot():
|
|
103 |
st.markdown(
|
104 |
"## How this works?\n"
|
105 |
"This app was built with [Vectara](https://vectara.com).\n"
|
106 |
-
"Vectara's [Indexing API](https://docs.vectara.com/docs/api-reference/indexing-apis/indexing) was used to ingest the data into a Vectara corpus (or index).\n\n"
|
107 |
"This app uses Vectara [Chat API](https://docs.vectara.com/docs/console-ui/vectara-chat-overview) to query the corpus and present the results to you, answering your question.\n\n"
|
108 |
-
)
|
109 |
-
st.markdown("---")
|
110 |
-
|
111 |
|
112 |
st.markdown(f"<center> <h2> Vectara AI Assistant: {cfg.title} </h2> </center>", unsafe_allow_html=True)
|
113 |
|
@@ -116,7 +115,7 @@ def launch_bot():
|
|
116 |
|
117 |
# Display chat messages
|
118 |
for message in st.session_state.messages:
|
119 |
-
with st.chat_message(message["role"]):
|
120 |
st.write(message["content"])
|
121 |
|
122 |
example_container = st.empty()
|
@@ -131,14 +130,14 @@ def launch_bot():
|
|
131 |
else:
|
132 |
prompt = st.chat_input()
|
133 |
if prompt:
|
134 |
-
st.session_state.messages.append({"role": "user", "content": prompt})
|
135 |
-
with st.chat_message("user"):
|
136 |
st.write(prompt)
|
137 |
st.session_state.ex_prompt = None
|
138 |
|
139 |
# Generate a new response if last message is not from assistant
|
140 |
if st.session_state.messages[-1]["role"] != "assistant":
|
141 |
-
with st.chat_message("assistant"):
|
142 |
if cfg.streaming:
|
143 |
stream = generate_streaming_response(prompt)
|
144 |
response = st.write_stream(stream)
|
@@ -148,7 +147,7 @@ def launch_bot():
|
|
148 |
st.write(response)
|
149 |
|
150 |
response = escape_dollars_outside_latex(response)
|
151 |
-
message = {"role": "assistant", "content": response}
|
152 |
st.session_state.messages.append(message)
|
153 |
|
154 |
# Send query and response to Amplitude Analytics
|
|
|
10 |
|
11 |
from utils import thumbs_feedback, send_amplitude_data, escape_dollars_outside_latex
|
12 |
|
13 |
+
from dotenv import load_dotenv
|
14 |
+
load_dotenv(override=True)
|
15 |
|
16 |
max_examples = 6
|
17 |
languages = {'English': 'eng', 'Spanish': 'spa', 'French': 'fra', 'Chinese': 'zho', 'German': 'deu', 'Hindi': 'hin', 'Arabic': 'ara',
|
|
|
34 |
|
35 |
def launch_bot():
|
36 |
def reset():
|
37 |
+
st.session_state.messages = [{"role": "assistant", "content": "How may I help you?", "avatar": 'π€'}]
|
38 |
st.session_state.ex_prompt = None
|
39 |
st.session_state.first_turn = True
|
40 |
|
|
|
86 |
image = Image.open('Vectara-logo.png')
|
87 |
st.image(image, width=175)
|
88 |
st.markdown(f"## About\n\n"
|
89 |
+
f"This demo uses Vectara RAG to ask questions about {cfg.source_data_desc}\n")
|
90 |
|
91 |
cfg.language = st.selectbox('Language:', languages.keys())
|
92 |
if st.session_state.language != cfg.language:
|
|
|
105 |
st.markdown(
|
106 |
"## How this works?\n"
|
107 |
"This app was built with [Vectara](https://vectara.com).\n"
|
|
|
108 |
"This app uses Vectara [Chat API](https://docs.vectara.com/docs/console-ui/vectara-chat-overview) to query the corpus and present the results to you, answering your question.\n\n"
|
109 |
+
)
|
|
|
|
|
110 |
|
111 |
st.markdown(f"<center> <h2> Vectara AI Assistant: {cfg.title} </h2> </center>", unsafe_allow_html=True)
|
112 |
|
|
|
115 |
|
116 |
# Display chat messages
|
117 |
for message in st.session_state.messages:
|
118 |
+
with st.chat_message(message["role"], avatar=message["avatar"]):
|
119 |
st.write(message["content"])
|
120 |
|
121 |
example_container = st.empty()
|
|
|
130 |
else:
|
131 |
prompt = st.chat_input()
|
132 |
if prompt:
|
133 |
+
st.session_state.messages.append({"role": "user", "content": prompt, "avatar": 'π§βπ»'})
|
134 |
+
with st.chat_message("user", avatar="π§βπ»"):
|
135 |
st.write(prompt)
|
136 |
st.session_state.ex_prompt = None
|
137 |
|
138 |
# Generate a new response if last message is not from assistant
|
139 |
if st.session_state.messages[-1]["role"] != "assistant":
|
140 |
+
with st.chat_message("assistant", avatar="π€"):
|
141 |
if cfg.streaming:
|
142 |
stream = generate_streaming_response(prompt)
|
143 |
response = st.write_stream(stream)
|
|
|
147 |
st.write(response)
|
148 |
|
149 |
response = escape_dollars_outside_latex(response)
|
150 |
+
message = {"role": "assistant", "content": response, "avatar": 'π€'}
|
151 |
st.session_state.messages.append(message)
|
152 |
|
153 |
# Send query and response to Amplitude Analytics
|
query.py
CHANGED
@@ -32,20 +32,30 @@ class VectaraQuery():
|
|
32 |
},
|
33 |
'reranker':
|
34 |
{
|
35 |
-
|
36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
},
|
38 |
},
|
39 |
'generation':
|
40 |
{
|
41 |
-
'
|
42 |
-
'max_used_search_results':
|
43 |
'response_language': response_lang,
|
44 |
'citations':
|
45 |
{
|
46 |
-
'style': '
|
|
|
47 |
},
|
48 |
-
'enable_factual_consistency_score':
|
49 |
},
|
50 |
'chat':
|
51 |
{
|
@@ -79,7 +89,6 @@ class VectaraQuery():
|
|
79 |
endpoint = "https://api.vectara.io/v2/chats"
|
80 |
|
81 |
body = self.get_body(query_str, language, stream=False)
|
82 |
-
|
83 |
response = requests.post(endpoint, data=json.dumps(body), verify=True, headers=self.get_headers())
|
84 |
|
85 |
if response.status_code != 200:
|
@@ -125,5 +134,7 @@ class VectaraQuery():
|
|
125 |
chunk = line['generation_chunk']
|
126 |
chunks.append(chunk)
|
127 |
yield chunk
|
|
|
|
|
128 |
|
129 |
return ''.join(chunks)
|
|
|
32 |
},
|
33 |
'reranker':
|
34 |
{
|
35 |
+
"type": "chain",
|
36 |
+
"rerankers": [
|
37 |
+
{
|
38 |
+
"type": "customer_reranker",
|
39 |
+
"reranker_name": "Rerank_Multilingual_v1"
|
40 |
+
},
|
41 |
+
{
|
42 |
+
"type": "mmr",
|
43 |
+
"diversity_bias": 0.05
|
44 |
+
}
|
45 |
+
]
|
46 |
},
|
47 |
},
|
48 |
'generation':
|
49 |
{
|
50 |
+
'generation_preset_name': self.prompt_name,
|
51 |
+
'max_used_search_results': 7,
|
52 |
'response_language': response_lang,
|
53 |
'citations':
|
54 |
{
|
55 |
+
'style': 'markdown',
|
56 |
+
'url_pattern': '{doc.url}'
|
57 |
},
|
58 |
+
'enable_factual_consistency_score': True
|
59 |
},
|
60 |
'chat':
|
61 |
{
|
|
|
89 |
endpoint = "https://api.vectara.io/v2/chats"
|
90 |
|
91 |
body = self.get_body(query_str, language, stream=False)
|
|
|
92 |
response = requests.post(endpoint, data=json.dumps(body), verify=True, headers=self.get_headers())
|
93 |
|
94 |
if response.status_code != 200:
|
|
|
134 |
chunk = line['generation_chunk']
|
135 |
chunks.append(chunk)
|
136 |
yield chunk
|
137 |
+
elif line['type'] == 'chat_info':
|
138 |
+
self.conv_id = line['chat_id']
|
139 |
|
140 |
return ''.join(chunks)
|
utils.py
CHANGED
@@ -25,9 +25,11 @@ def thumbs_feedback(feedback, **kwargs):
|
|
25 |
st.session_state.feedback_key += 1
|
26 |
|
27 |
def send_amplitude_data(user_query, chat_response, demo_name, language, feedback=None):
|
28 |
-
|
|
|
|
|
29 |
data = {
|
30 |
-
"api_key":
|
31 |
"events": [{
|
32 |
"device_id": st.session_state.device_id,
|
33 |
"event_type": "submitted_query",
|
|
|
25 |
st.session_state.feedback_key += 1
|
26 |
|
27 |
def send_amplitude_data(user_query, chat_response, demo_name, language, feedback=None):
|
28 |
+
amplitude_api_key = os.getenv('AMPLITUDE_TOKEN')
|
29 |
+
if not amplitude_api_key:
|
30 |
+
return
|
31 |
data = {
|
32 |
+
"api_key": amplitude_api_key,
|
33 |
"events": [{
|
34 |
"device_id": st.session_state.device_id,
|
35 |
"event_type": "submitted_query",
|