ofermend commited on
Commit
8d7a085
1 Parent(s): 92a65c1

Update query.py

Browse files
Files changed (1) hide show
  1. query.py +121 -42
query.py CHANGED
@@ -8,6 +8,47 @@ def extract_between_tags(text, start_tag, end_tag):
8
  end_index = text.find(end_tag, start_index)
9
  return text[start_index+len(start_tag):end_index-len(end_tag)]
10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  class VectaraQuery():
12
  def __init__(self, api_key: str, customer_id: str, corpus_ids: list[str], prompt_name: str = None):
13
  self.customer_id = customer_id
@@ -16,23 +57,13 @@ class VectaraQuery():
16
  self.prompt_name = prompt_name if prompt_name else "vectara-summary-ext-v1.2.0"
17
  self.conv_id = None
18
 
19
- def submit_query(self, query_str: str):
20
  corpora_key_list = [{
21
  'customer_id': self.customer_id, 'corpus_id': corpus_id, 'lexical_interpolation_config': {'lambda': 0.025}
22
  } for corpus_id in self.corpus_ids
23
  ]
24
 
25
- endpoint = f"https://api.vectara.io/v1/query"
26
- start_tag = "%START_SNIPPET%"
27
- end_tag = "%END_SNIPPET%"
28
- headers = {
29
- "Content-Type": "application/json",
30
- "Accept": "application/json",
31
- "customer-id": self.customer_id,
32
- "x-api-key": self.api_key,
33
- "grpc-timeout": "60S"
34
- }
35
- body = {
36
  'query': [
37
  {
38
  'query': query_str,
@@ -42,8 +73,8 @@ class VectaraQuery():
42
  'context_config': {
43
  'sentences_before': 2,
44
  'sentences_after': 2,
45
- 'start_tag': start_tag,
46
- 'end_tag': end_tag,
47
  },
48
  'rerankingConfig':
49
  {
@@ -61,14 +92,27 @@ class VectaraQuery():
61
  'store': True,
62
  'conversationId': self.conv_id
63
  },
64
- # 'debug': True,
65
  }
66
  ]
67
  }
68
  ]
69
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
 
71
- response = requests.post(endpoint, data=json.dumps(body), verify=True, headers=headers)
72
  if response.status_code != 200:
73
  print(f"Query failed with code {response.status_code}, reason {response.reason}, text {response.text}")
74
  return "Sorry, something went wrong in my brain. Please try again later."
@@ -90,33 +134,68 @@ class VectaraQuery():
90
  return 'Sorry, something went wrong in my brain. Please try again later.'
91
 
92
  self.conv_id = res['responseSet'][0]['summary'][0]['chat']['conversationId']
 
 
 
 
 
 
 
 
93
 
94
- pattern = r'\[\d{1,2}\]'
95
- matches = [match.span() for match in re.finditer(pattern, summary)]
 
 
96
 
97
- # figure out unique list of references
98
- refs = []
99
- for match in matches:
100
- start, end = match
101
- response_num = int(summary[start+1:end-1])
102
- doc_num = responses[response_num-1]['documentIndex']
103
- metadata = {item['name']: item['value'] for item in docs[doc_num]['metadata']}
104
- text = extract_between_tags(responses[response_num-1]['text'], start_tag, end_tag)
105
- if 'url' in metadata.keys():
106
- url = f"{metadata['url']}#:~:text={quote(text)}"
107
- if url not in refs:
108
- refs.append(url)
109
 
110
- # replace references with markdown links
111
- refs_dict = {url:(inx+1) for inx,url in enumerate(refs)}
112
- for match in reversed(matches):
113
- start, end = match
114
- response_num = int(summary[start+1:end-1])
115
- doc_num = responses[response_num-1]['documentIndex']
116
- metadata = {item['name']: item['value'] for item in docs[doc_num]['metadata']}
117
- text = extract_between_tags(responses[response_num-1]['text'], start_tag, end_tag)
118
- url = f"{metadata['url']}#:~:text={quote(text)}"
119
- citation_inx = refs_dict[url]
120
- summary = summary[:start] + f'[\[{citation_inx}\]]({url})' + summary[end:]
 
 
 
 
 
 
121
 
122
- return summary
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  end_index = text.find(end_tag, start_index)
9
  return text[start_index+len(start_tag):end_index-len(end_tag)]
10
 
11
+ class CitationNormalizer():
12
+
13
+ def __init__(self, responses, docs):
14
+ self.docs = docs
15
+ self.responses = responses
16
+ self.refs = []
17
+
18
+ def normalize_citations(self, summary):
19
+ start_tag = "%START_SNIPPET%"
20
+ end_tag = "%END_SNIPPET%"
21
+
22
+ # find all references in the summary
23
+ pattern = r'\[\d{1,2}\]'
24
+ matches = [match.span() for match in re.finditer(pattern, summary)]
25
+
26
+ # figure out unique list of references
27
+ for match in matches:
28
+ start, end = match
29
+ response_num = int(summary[start+1:end-1])
30
+ doc_num = self.responses[response_num-1]['documentIndex']
31
+ metadata = {item['name']: item['value'] for item in self.docs[doc_num]['metadata']}
32
+ text = extract_between_tags(self.responses[response_num-1]['text'], start_tag, end_tag)
33
+ if 'url' in metadata.keys():
34
+ url = f"{metadata['url']}#:~:text={quote(text)}"
35
+ if url not in self.refs:
36
+ self.refs.append(url)
37
+
38
+ # replace references with markdown links
39
+ refs_dict = {url:(inx+1) for inx,url in enumerate(self.refs)}
40
+ for match in reversed(matches):
41
+ start, end = match
42
+ response_num = int(summary[start+1:end-1])
43
+ doc_num = self.responses[response_num-1]['documentIndex']
44
+ metadata = {item['name']: item['value'] for item in self.docs[doc_num]['metadata']}
45
+ text = extract_between_tags(self.responses[response_num-1]['text'], start_tag, end_tag)
46
+ url = f"{metadata['url']}#:~:text={quote(text)}"
47
+ citation_inx = refs_dict[url]
48
+ summary = summary[:start] + f'[\[{citation_inx}\]]({url})' + summary[end:]
49
+
50
+ return summary
51
+
52
  class VectaraQuery():
53
  def __init__(self, api_key: str, customer_id: str, corpus_ids: list[str], prompt_name: str = None):
54
  self.customer_id = customer_id
 
57
  self.prompt_name = prompt_name if prompt_name else "vectara-summary-ext-v1.2.0"
58
  self.conv_id = None
59
 
60
+ def get_body(self, query_str: str):
61
  corpora_key_list = [{
62
  'customer_id': self.customer_id, 'corpus_id': corpus_id, 'lexical_interpolation_config': {'lambda': 0.025}
63
  } for corpus_id in self.corpus_ids
64
  ]
65
 
66
+ return {
 
 
 
 
 
 
 
 
 
 
67
  'query': [
68
  {
69
  'query': query_str,
 
73
  'context_config': {
74
  'sentences_before': 2,
75
  'sentences_after': 2,
76
+ 'start_tag': "%START_SNIPPET%",
77
+ 'end_tag': "%END_SNIPPET%",
78
  },
79
  'rerankingConfig':
80
  {
 
92
  'store': True,
93
  'conversationId': self.conv_id
94
  },
 
95
  }
96
  ]
97
  }
98
  ]
99
  }
100
+
101
+ def get_headers(self):
102
+ return {
103
+ "Content-Type": "application/json",
104
+ "Accept": "application/json",
105
+ "customer-id": self.customer_id,
106
+ "x-api-key": self.api_key,
107
+ "grpc-timeout": "60S"
108
+ }
109
+
110
+ def submit_query(self, query_str: str):
111
+
112
+ endpoint = f"https://api.vectara.io/v1/query"
113
+ body = self.get_body(query_str)
114
 
115
+ response = requests.post(endpoint, data=json.dumps(body), verify=True, headers=self.get_headers())
116
  if response.status_code != 200:
117
  print(f"Query failed with code {response.status_code}, reason {response.reason}, text {response.text}")
118
  return "Sorry, something went wrong in my brain. Please try again later."
 
134
  return 'Sorry, something went wrong in my brain. Please try again later.'
135
 
136
  self.conv_id = res['responseSet'][0]['summary'][0]['chat']['conversationId']
137
+
138
+ summary = CitationNormalizer().normalize_citations(summary, responses, docs)
139
+ return summary
140
+
141
+ def submit_query_streaming(self, query_str: str):
142
+
143
+ endpoint = f"https://api.vectara.io/v1/stream-query"
144
+ body = self.get_body(query_str)
145
 
146
+ response = requests.post(endpoint, data=json.dumps(body), verify=True, headers=self.get_headers(), stream=True)
147
+ if response.status_code != 200:
148
+ print(f"Query failed with code {response.status_code}, reason {response.reason}, text {response.text}")
149
+ return "Sorry, something went wrong in my brain. Please try again later."
150
 
151
+ chunks = []
152
+ accumulated_text = "" # Initialize text accumulation
153
+ pattern_max_length = 50 # Example heuristic
154
+ for line in response.iter_lines():
155
+ if line: # filter out keep-alive new lines
156
+ data = json.loads(line.decode('utf-8'))
157
+ res = data['result']
 
 
 
 
 
158
 
159
+ # capture responses and docs if we get that first
160
+ response_set = res['responseSet']
161
+ if response_set is not None:
162
+ # do we have chat conv_id to update?
163
+ summary = response_set.get('summary', [])
164
+ if len(summary)>0:
165
+ chat = summary[0].get('chat', None)
166
+ if chat and chat.get('status', None):
167
+ st_code = chat['status']
168
+ print(f"Chat query failed with code {st_code}")
169
+ if st_code == 'RESOURCE_EXHAUSTED':
170
+ self.conv_id = None
171
+ return 'Sorry, Vectara chat turns exceeds plan limit.'
172
+ return 'Sorry, something went wrong in my brain. Please try again later.'
173
+ conv_id = chat.get('conversationId', None) if chat else None
174
+ if conv_id:
175
+ self.conv_id = conv_id
176
 
177
+ else:
178
+ # grab next chunk and yield it as output
179
+ summary = res.get('summary', None)
180
+ if summary is None:
181
+ continue
182
+ chunk = data['result']['summary']['text']
183
+ accumulated_text += chunk # Append current chunk to accumulation
184
+ if len(accumulated_text) > pattern_max_length:
185
+ accumulated_text = re.sub(r"\[\d+\]", "", accumulated_text)
186
+ accumulated_text = re.sub(r"\s+\.", ".", accumulated_text)
187
+ out_chunk = accumulated_text[:-pattern_max_length]
188
+ chunks.append(out_chunk)
189
+ yield out_chunk
190
+ accumulated_text = accumulated_text[-pattern_max_length:]
191
+
192
+ if summary['done']:
193
+ break
194
+
195
+ # yield the last piece
196
+ if len(accumulated_text) > 0:
197
+ accumulated_text = re.sub(r" \[\d+\]\.", ".", accumulated_text)
198
+ chunks.append(accumulated_text)
199
+ yield accumulated_text
200
+
201
+ return ''.join(chunks)