Spaces:
Sleeping
Sleeping
from omegaconf import OmegaConf | |
from query import VectaraQuery | |
import os | |
import gradio as gr | |
def isTrue(x) -> bool: | |
if isinstance(x, bool): | |
return x | |
return x.strip().lower() == 'true' | |
corpus_keys = str(os.environ['corpus_keys']).split(',') | |
cfg = OmegaConf.create({ | |
'corpus_keys': corpus_keys, | |
'api_key': str(os.environ['api_key']), | |
'title': os.environ['title'], | |
'source_data_desc': os.environ['source_data_desc'], | |
'streaming': isTrue(os.environ.get('streaming', False)), | |
'prompt_name': os.environ.get('prompt_name', None), | |
'examples': os.environ.get('examples', None) | |
}) | |
vq = VectaraQuery(cfg.api_key, cfg.corpus_keys, cfg.prompt_name) | |
def respond(message, history): | |
if cfg.streaming: | |
# Call stream response and stream output | |
stream = vq.submit_query_streaming(message) | |
for output in stream: | |
yield output | |
else: | |
# Call non-stream response and return message output | |
response = vq.submit_query(message) | |
yield response | |
def vote(data: gr.LikeData): | |
if data.liked: | |
print("Received Thumbs up") | |
else: | |
print("Received Thumbs down") | |
heading_html = f''' | |
<table> | |
<tr> | |
<td style="width: 80%; text-align: left; vertical-align: middle;"> | |
<h1>Vectara AI Assistant: {cfg.title}</h1> | |
</td> | |
<td style="width: 20%; text-align: right; vertical-align: middle;"> | |
<img src="https://github.com/david-oplatka/chatbot-streamlit/blob/main/Vectara-logo.png?raw=true"> | |
</td> | |
</tr> | |
<tr> | |
<td colspan="2" style="font-size: 16px;">This demo uses Retrieval Augmented Generation from <a href="https://vectara.com/">Vectara</a> to ask questions about {cfg.source_data_desc}.</td> | |
</tr> | |
</table> | |
''' | |
bot_css = """ | |
table { border: none; width: 100%; table-layout: fixed; border-collapse: separate;} | |
td { vertical-align: middle; border: none;} | |
img { width: 75%;} | |
h1 { font-size: 2em; /* Adjust the size as needed */} | |
""" | |
if cfg.examples: | |
app_examples = [example.strip() for example in cfg.examples.split(",")] | |
else: | |
app_examples = None | |
with gr.Blocks(css=bot_css) as demo: | |
gr.HTML(heading_html) | |
chatbot = gr.Chatbot(value=[[None, "How may I help you?"]]) | |
msg = gr.Textbox(label="Message") | |
clear = gr.Button("Clear") | |
def user(message, history): | |
return "", history + [[message, None]] | |
def bot(history): | |
message = history[-1][0] | |
bot_message = respond(message, history) | |
if cfg.streaming: | |
full_response = "" | |
for chunk in bot_message: | |
full_response += chunk | |
history[-1][1] = full_response | |
yield history | |
else: | |
history[-1][1] = next(bot_message) | |
yield history | |
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then( | |
bot, chatbot, chatbot, api_name="bot_response" | |
) | |
chatbot.like(vote, None, None) | |
clear.click(lambda: None, None, chatbot, queue=False) | |
if app_examples: | |
gr.Examples( | |
app_examples, | |
inputs=msg, | |
outputs=chatbot, | |
fn=user, | |
cache_examples=False | |
) | |
if __name__ == "__main__": | |
demo.launch() | |
# from omegaconf import OmegaConf | |
# from query import VectaraQuery | |
# import os | |
# import gradio as gr | |
# def isTrue(x) -> bool: | |
# if isinstance(x, bool): | |
# return x | |
# return x.strip().lower() == 'true' | |
# corpus_keys = str(os.environ['corpus_keys']).split(',') | |
# cfg = OmegaConf.create({ | |
# 'corpus_keys': corpus_keys, | |
# 'api_key': str(os.environ['api_key']), | |
# 'title': os.environ['title'], | |
# 'source_data_desc': os.environ['source_data_desc'], | |
# 'streaming': isTrue(os.environ.get('streaming', False)), | |
# 'prompt_name': os.environ.get('prompt_name', None), | |
# 'examples': os.environ.get('examples', None) | |
# }) | |
# vq = VectaraQuery(cfg.api_key, cfg.corpus_keys, cfg.prompt_name) | |
# def respond(message, history): | |
# if cfg.streaming: | |
# # Call stream response and stream output | |
# stream = vq.submit_query_streaming(message) | |
# outputs = "" | |
# for output in stream: | |
# outputs += output | |
# yield outputs | |
# else: | |
# # Call non-stream response and return message output | |
# response = vq.submit_query(message) | |
# yield response | |
# heading_html = f''' | |
# <table> | |
# <tr> | |
# <td style="width: 80%; text-align: left; vertical-align: middle;"> <h1>Vectara AI Assistant: {cfg.title}</h1> </td> | |
# <td style="width: 20%; text-align: right; vertical-align: middle;"> <img src="https://github.com/david-oplatka/chatbot-streamlit/blob/main/Vectara-logo.png?raw=true"> </td> | |
# </tr> | |
# <tr> | |
# <td colspan="2" style="font-size: 16px;">This demo uses Retrieval Augmented Generation from <a href="https://vectara.com/">Vectara</a> to ask questions about {cfg.source_data_desc}.</td> | |
# </tr> | |
# </table> | |
# ''' | |
# bot_css = """ | |
# table { | |
# border: none; | |
# width: 100%; | |
# table-layout: fixed; | |
# border-collapse: separate; | |
# } | |
# td { | |
# vertical-align: middle; | |
# border: none; | |
# } | |
# img { | |
# width: 75%; | |
# } | |
# h1 { | |
# font-size: 2em; /* Adjust the size as needed */ | |
# } | |
# """ | |
# if cfg.examples: | |
# app_examples = [example.strip() for example in cfg.examples.split(",")] | |
# else: | |
# app_examples = None | |
# demo = gr.ChatInterface(respond, description = heading_html, css = bot_css, | |
# chatbot = gr.Chatbot(value = [[None, "How may I help you?"]]), examples = app_examples, cache_examples = False) | |
# if __name__ == "__main__": | |
# demo.launch() |