Spaces:
Sleeping
Sleeping
import requests | |
import json | |
import re | |
from urllib.parse import quote | |
def extract_between_tags(text, start_tag, end_tag): | |
start_index = text.find(start_tag) | |
end_index = text.find(end_tag, start_index) | |
return text[start_index+len(start_tag):end_index-len(end_tag)] | |
class VectaraQuery(): | |
def __init__(self, api_key: str, customer_id: int, corpus_ids: list): | |
self.customer_id = customer_id | |
self.corpus_ids = corpus_ids | |
self.api_key = api_key | |
self.conv_id = None | |
def submit_query(self, query_str: str): | |
corpora_key_list = [{ | |
'customer_id': str(self.customer_id), 'corpus_id': str(corpus_id), 'lexical_interpolation_config': {'lambda': 0.025} | |
} for corpus_id in self.corpus_ids | |
] | |
endpoint = f"https://api.vectara.io/v1/query" | |
start_tag = "%START_SNIPPET%" | |
end_tag = "%END_SNIPPET%" | |
headers = { | |
"Content-Type": "application/json", | |
"Accept": "application/json", | |
"customer-id": str(self.customer_id), | |
"x-api-key": self.api_key, | |
"grpc-timeout": "60S" | |
} | |
body = { | |
'query': [ | |
{ | |
'query': query_str, | |
'start': 0, | |
'numResults': 7, | |
'corpusKey': corpora_key_list, | |
'context_config': { | |
'sentences_before': 2, | |
'sentences_after': 2, | |
'start_tag': start_tag, | |
'end_tag': end_tag, | |
}, | |
'summary': [ | |
{ | |
'responseLang': 'eng', | |
'maxSummarizedResults': 5, | |
'chat': { | |
'store': True, | |
'conversationId': self.conv_id | |
} | |
} | |
] | |
} | |
] | |
} | |
response = requests.post(endpoint, data=json.dumps(body), verify=True, headers=headers) | |
if response.status_code != 200: | |
print(f"Query failed with code {response.status_code}, reason {response.reason}, text {response.text}") | |
return "Sorry, something went wrong in my brain. Please try again later." | |
res = response.json() | |
summary = res['responseSet'][0]['summary'][0]['text'] | |
responses = res['responseSet'][0]['response'] | |
docs = res['responseSet'][0]['document'] | |
self.conv_id = summary[0]['chat']['conversationId'] | |
pattern = r'\[\d{1,2}\]' | |
matches = [match.span() for match in re.finditer(pattern, summary)] | |
# figure out unique list of references | |
refs = [] | |
for match in matches: | |
start, end = match | |
response_num = int(summary[start+1:end-1]) | |
doc_num = responses[response_num-1]['documentIndex'] | |
metadata = {item['name']: item['value'] for item in docs[doc_num]['metadata']} | |
text = extract_between_tags(responses[response_num-1]['text'], start_tag, end_tag) | |
url = f"{metadata['url']}#:~:text={quote(text)}" | |
if url not in refs: | |
refs.append(url) | |
# replace references with markdown links | |
refs_dict = {url:(inx+1) for inx,url in enumerate(refs)} | |
for match in reversed(matches): | |
start, end = match | |
response_num = int(summary[start+1:end-1]) | |
doc_num = responses[response_num-1]['documentIndex'] | |
metadata = {item['name']: item['value'] for item in docs[doc_num]['metadata']} | |
text = extract_between_tags(responses[response_num-1]['text'], start_tag, end_tag) | |
url = f"{metadata['url']}#:~:text={quote(text)}" | |
citation_inx = refs_dict[url] | |
summary = summary[:start] + f'[\[{citation_inx}\]]({url})' + summary[end:] | |
return summary | |