|
import os |
|
import uuid |
|
import json |
|
|
|
import gradio as gr |
|
|
|
from openai import AzureOpenAI |
|
|
|
from langchain_huggingface import HuggingFaceEmbeddings |
|
from langchain_community.vectorstores import Chroma |
|
|
|
from huggingface_hub import CommitScheduler |
|
from pathlib import Path |
|
|
|
|
|
client = AzureOpenAI( |
|
api_key=os.environ["AZURE_OPENAI_KEY"], |
|
azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], |
|
api_version="2024-02-01" |
|
) |
|
|
|
embedding_model = HuggingFaceEmbeddings(model_name='thenlper/gte-small') |
|
|
|
tesla_10k_collection = 'tesla-10k-2019-to-2023' |
|
|
|
vectorstore_persisted = Chroma( |
|
collection_name=tesla_10k_collection, |
|
persist_directory='./tesla_db', |
|
embedding_function=embedding_model |
|
) |
|
|
|
retriever = vectorstore_persisted.as_retriever( |
|
search_type='similarity', |
|
search_kwargs={'k': 5} |
|
) |
|
|
|
|
|
|
|
log_file = Path("logs/") / f"data_{uuid.uuid4()}.json" |
|
log_folder = log_file.parent |
|
|
|
scheduler = CommitScheduler( |
|
repo_id="document-qna-chroma-anyscale-logs", |
|
repo_type="dataset", |
|
folder_path=log_folder, |
|
path_in_repo="data", |
|
every=2 |
|
) |
|
|
|
qna_system_message = """ |
|
You are an assistant to a financial services firm who answers user queries on annual reports. |
|
Users will ask questions delimited by triple backticks, that is, ```. |
|
User input will have the context required by you to answer user questions. |
|
This context will begin with the token: ###Context. |
|
The context contains references to specific portions of a document relevant to the user query. |
|
Please answer only using the context provided in the input. However, do not mention anything about the context in your answer. |
|
If the answer is not found in the context, respond "I don't know". |
|
""" |
|
|
|
qna_user_message_template = """ |
|
###Context |
|
Here are some documents that are relevant to the question. |
|
{context} |
|
``` |
|
{question} |
|
``` |
|
""" |
|
|
|
|
|
def predict(user_input): |
|
|
|
relevant_document_chunks = retriever.invoke(user_input) |
|
context_list = [d.page_content for d in relevant_document_chunks] |
|
context_for_query = ".".join(context_list) |
|
|
|
prompt = [ |
|
{'role':'system', 'content': qna_system_message}, |
|
{'role': 'user', 'content': qna_user_message_template.format( |
|
context=context_for_query, |
|
question=user_input |
|
) |
|
} |
|
] |
|
|
|
try: |
|
response = client.chat.completions.create( |
|
model='gpt-4o-mini', |
|
messages=prompt, |
|
temperature=0 |
|
) |
|
|
|
prediction = response.choices[0].message.content |
|
|
|
except Exception as e: |
|
prediction = e |
|
|
|
|
|
|
|
|
|
|
|
with scheduler.lock: |
|
with log_file.open("a") as f: |
|
f.write(json.dumps( |
|
{ |
|
'user_input': user_input, |
|
'retrieved_context': context_for_query, |
|
'model_response': prediction |
|
} |
|
)) |
|
f.write("\n") |
|
|
|
return prediction |
|
|
|
|
|
textbox = gr.Textbox(placeholder="Enter your query here", lines=6) |
|
|
|
|
|
demo = gr.Interface( |
|
inputs=textbox, fn=predict, outputs="text", |
|
title="AMA on Tesla 10-K statements", |
|
description="This web API presents an interface to ask questions on contents of the Tesla 10-K reports for the period 2019 - 2023.", |
|
article="Note that questions that are not relevant to the Tesla 10-K report will not be answered.", |
|
examples=[["What was the total revenue of the company in 2022?", "$ 81.46 Billion"], |
|
["Summarize the Management Discussion and Analysis section of the 2021 report in 50 words.", ""], |
|
["What was the company's debt level in 2020?", ""], |
|
["Identify 5 key risks identified in the 2019 10k report? Respond with bullet point summaries.", ""], |
|
["What is the view of the management on the future of electric vehicle batteries?",""] |
|
], |
|
cache_examples=False, |
|
theme=gr.themes.Base(), |
|
concurrency_limit=16 |
|
) |
|
|
|
demo.queue() |
|
demo.launch(auth=("demouser", os.getenv('PASSWD'))) |