Spaces:
Sleeping
Sleeping
david-oplatka
commited on
Commit
•
b3159ec
1
Parent(s):
156af69
Update query.py
Browse files
query.py
CHANGED
@@ -3,52 +3,53 @@ import json
|
|
3 |
|
4 |
|
5 |
class VectaraQuery():
|
6 |
-
def __init__(self, api_key: str,
|
7 |
-
self.
|
8 |
-
self.corpus_ids = corpus_ids
|
9 |
self.api_key = api_key
|
10 |
self.prompt_name = prompt_name if prompt_name else "vectara-experimental-summary-ext-2023-12-11-sml"
|
11 |
self.conv_id = None
|
12 |
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
|
|
17 |
]
|
18 |
|
19 |
return {
|
20 |
-
'query':
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
|
|
52 |
}
|
53 |
|
54 |
|
@@ -56,76 +57,73 @@ class VectaraQuery():
|
|
56 |
return {
|
57 |
"Content-Type": "application/json",
|
58 |
"Accept": "application/json",
|
59 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
"x-api-key": self.api_key,
|
61 |
"grpc-timeout": "60S"
|
62 |
}
|
63 |
|
64 |
def submit_query(self, query_str: str):
|
65 |
|
66 |
-
|
67 |
-
|
|
|
|
|
68 |
|
69 |
-
|
|
|
|
|
70 |
if response.status_code != 200:
|
71 |
print(f"Query failed with code {response.status_code}, reason {response.reason}, text {response.text}")
|
72 |
return "Sorry, something went wrong in my brain. Please try again later."
|
73 |
|
74 |
res = response.json()
|
75 |
|
76 |
-
|
77 |
-
|
|
|
|
|
78 |
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
|
|
86 |
|
87 |
-
self.conv_id = chat['conversationId'] if chat else None
|
88 |
return summary
|
89 |
|
90 |
def submit_query_streaming(self, query_str: str):
|
91 |
|
92 |
-
|
93 |
-
|
|
|
|
|
|
|
|
|
94 |
|
95 |
-
response = requests.post(endpoint, data=json.dumps(body), verify=True, headers=self.
|
|
|
96 |
if response.status_code != 200:
|
97 |
print(f"Query failed with code {response.status_code}, reason {response.reason}, text {response.text}")
|
98 |
return "Sorry, something went wrong in my brain. Please try again later."
|
99 |
|
100 |
chunks = []
|
101 |
for line in response.iter_lines():
|
|
|
102 |
if line: # filter out keep-alive new lines
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
if summary is None or len(summary)==0:
|
110 |
-
continue
|
111 |
-
else:
|
112 |
-
chat = summary.get('chat', None)
|
113 |
-
if chat and chat.get('status', None):
|
114 |
-
st_code = chat['status']
|
115 |
-
print(f"Chat query failed with code {st_code}")
|
116 |
-
if st_code == 'RESOURCE_EXHAUSTED':
|
117 |
-
self.conv_id = None
|
118 |
-
return 'Sorry, Vectara chat turns exceeds plan limit.'
|
119 |
-
return 'Sorry, something went wrong in my brain. Please try again later.'
|
120 |
-
conv_id = chat.get('conversationId', None) if chat else None
|
121 |
-
if conv_id:
|
122 |
-
self.conv_id = conv_id
|
123 |
-
|
124 |
-
chunk = summary['text']
|
125 |
-
chunks.append(chunk)
|
126 |
-
yield chunk
|
127 |
-
|
128 |
-
if summary['done']:
|
129 |
-
break
|
130 |
-
|
131 |
return ''.join(chunks)
|
|
|
3 |
|
4 |
|
5 |
class VectaraQuery():
|
6 |
+
def __init__(self, api_key: str, corpus_keys: list[str], prompt_name: str = None):
|
7 |
+
self.corpus_keys = corpus_keys
|
|
|
8 |
self.api_key = api_key
|
9 |
self.prompt_name = prompt_name if prompt_name else "vectara-experimental-summary-ext-2023-12-11-sml"
|
10 |
self.conv_id = None
|
11 |
|
12 |
+
|
13 |
+
def get_body(self, query_str: str, stream: False):
|
14 |
+
corpora_list = [{
|
15 |
+
'corpus_key': corpus_key, 'lexical_interpolation': 0.005
|
16 |
+
} for corpus_key in self.corpus_keys
|
17 |
]
|
18 |
|
19 |
return {
|
20 |
+
'query': query_str,
|
21 |
+
'search':
|
22 |
+
{
|
23 |
+
'corpora': corpora_list,
|
24 |
+
'offset': 0,
|
25 |
+
'limit': 50,
|
26 |
+
'context_configuration':
|
27 |
+
{
|
28 |
+
'sentences_before': 2,
|
29 |
+
'sentences_after': 2,
|
30 |
+
'start_tag': "%START_SNIPPET%",
|
31 |
+
'end_tag': "%END_SNIPPET%",
|
32 |
+
},
|
33 |
+
'reranker':
|
34 |
+
{
|
35 |
+
'type': 'mmr'
|
36 |
+
},
|
37 |
+
},
|
38 |
+
'generation':
|
39 |
+
{
|
40 |
+
'prompt_name': self.prompt_name,
|
41 |
+
'max_used_search_results': 10,
|
42 |
+
'response_language': 'eng',
|
43 |
+
'citations':
|
44 |
+
{
|
45 |
+
'style': 'none'
|
46 |
+
}
|
47 |
+
},
|
48 |
+
'chat':
|
49 |
+
{
|
50 |
+
'store': True
|
51 |
+
},
|
52 |
+
'stream_response': stream
|
53 |
}
|
54 |
|
55 |
|
|
|
57 |
return {
|
58 |
"Content-Type": "application/json",
|
59 |
"Accept": "application/json",
|
60 |
+
"x-api-key": self.api_key,
|
61 |
+
"grpc-timeout": "60S"
|
62 |
+
}
|
63 |
+
|
64 |
+
def get_stream_headers(self):
|
65 |
+
return {
|
66 |
+
"Content-Type": "application/json",
|
67 |
+
"Accept": "text/event-stream",
|
68 |
"x-api-key": self.api_key,
|
69 |
"grpc-timeout": "60S"
|
70 |
}
|
71 |
|
72 |
def submit_query(self, query_str: str):
|
73 |
|
74 |
+
if self.conv_id:
|
75 |
+
endpoint = f"https://api.vectara.io/v2/chats/{self.conv_id}/turns"
|
76 |
+
else:
|
77 |
+
endpoint = "https://api.vectara.io/v2/chats"
|
78 |
|
79 |
+
body = self.get_body(query_str, stream=False)
|
80 |
+
|
81 |
+
response = requests.post(endpoint, data=json.dumps(body), verify=True, headers=self.get_headers())
|
82 |
if response.status_code != 200:
|
83 |
print(f"Query failed with code {response.status_code}, reason {response.reason}, text {response.text}")
|
84 |
return "Sorry, something went wrong in my brain. Please try again later."
|
85 |
|
86 |
res = response.json()
|
87 |
|
88 |
+
if self.conv_id is None:
|
89 |
+
self.conv_id = res['chat_id']
|
90 |
+
|
91 |
+
summary = res['answer']
|
92 |
|
93 |
+
# FIGURE OUT HOW TO IMPLEMENT THIS IN APIV2
|
94 |
+
# if chat and chat['status'] is not None:
|
95 |
+
# st_code = chat['status']
|
96 |
+
# print(f"Chat query failed with code {st_code}")
|
97 |
+
# if st_code == 'RESOURCE_EXHAUSTED':
|
98 |
+
# self.conv_id = None
|
99 |
+
# return 'Sorry, Vectara chat turns exceeds plan limit.'
|
100 |
+
# return 'Sorry, something went wrong in my brain. Please try again later.'
|
101 |
|
|
|
102 |
return summary
|
103 |
|
104 |
def submit_query_streaming(self, query_str: str):
|
105 |
|
106 |
+
if self.conv_id:
|
107 |
+
endpoint = f"https://api.vectara.io/v2/chats/{self.conv_id}/turns"
|
108 |
+
else:
|
109 |
+
endpoint = "https://api.vectara.io/v2/chats"
|
110 |
+
|
111 |
+
body = self.get_body(query_str, stream=True)
|
112 |
|
113 |
+
response = requests.post(endpoint, data=json.dumps(body), verify=True, headers=self.get_stream_headers(), stream=True)
|
114 |
+
|
115 |
if response.status_code != 200:
|
116 |
print(f"Query failed with code {response.status_code}, reason {response.reason}, text {response.text}")
|
117 |
return "Sorry, something went wrong in my brain. Please try again later."
|
118 |
|
119 |
chunks = []
|
120 |
for line in response.iter_lines():
|
121 |
+
line = line.decode('utf-8')
|
122 |
if line: # filter out keep-alive new lines
|
123 |
+
key, value = line.split(':', 1)
|
124 |
+
if key == 'data':
|
125 |
+
line = json.loads(value)
|
126 |
+
if line['type'] == 'generation_chunk':
|
127 |
+
chunks.append(line['generation_chunk'])
|
128 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
129 |
return ''.join(chunks)
|