File size: 5,001 Bytes
f6da3c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5d2532c
f6da3c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50c19ae
 
 
 
 
 
 
 
 
 
f6da3c7
 
65e0ee4
 
 
 
 
 
 
 
 
 
 
 
f6da3c7
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
import streamlit as st
import pandas as pd
import sqlite3
from llama_index.core import StorageContext, load_index_from_storage
from llama_index.llms.ollama import Ollama
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core import PromptTemplate
import os


version = 2.3

# Initialize the SQLite3 database
conn = sqlite3.connect('qa.db')
c = conn.cursor()
# Update the table creation to include the version column
c.execute('CREATE TABLE IF NOT EXISTS qa (question TEXT, answer TEXT, version REAL)')
conn.commit()

# Read the LLM Model Description from a file
def read_description_from_file(file_path):
    with open(file_path, 'r') as file:
        return file.read()

# Define the folder containing the saved index
INDEX_OUTPUT_PATH = "./output_index"

# Ensure the output directory exists
if not os.path.exists(INDEX_OUTPUT_PATH):
    raise ValueError(f"Index directory {INDEX_OUTPUT_PATH} does not exist")

# Setup LLM and embedding model
llm = Ollama(model="llama3", request_timeout=120.0)
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-large-en-v1.5", trust_remote_code=True)

# To load the index later, set up the storage context
storage_context = StorageContext.from_defaults(persist_dir=INDEX_OUTPUT_PATH)
loaded_index = load_index_from_storage(embed_model=embed_model, storage_context=storage_context)

# Define a query engine (assuming it needs the LLM and embedding model)
query_engine = loaded_index.as_query_engine(llm=llm, embed_model=embed_model)

# Customise prompt template
# Read the prompt template from a file
qa_prompt_tmpl_str = read_description_from_file("tab2_pe.txt")
qa_prompt_tmpl = PromptTemplate(qa_prompt_tmpl_str)

query_engine.update_prompts(
    {"response_synthesizer:text_qa_template": qa_prompt_tmpl}
)

# Save the question and answer to the SQLite3 database
def save_to_db(question, answer, version):
    c.execute('INSERT INTO qa (question, answer, version) VALUES (?, ?, ?)', (question, answer, version))
    conn.commit()

# Fetch all data from the SQLite3 database
def fetch_from_db():
    c.execute('SELECT * FROM qa')
    return c.fetchall()

def main():
    st.title("How Much AI Can Assist The Email Replying System Of Our Council?")

    tab1, tab2, tab3 = st.tabs(["LLM Model Description", "Ask a Question", "View Q&A History"])

    with tab1:
        st.subheader("LLM Model Description")
        description = read_description_from_file("tab1_intro.txt")
        st.write(description)

    with tab2:
        st.subheader("Ask a Question (Please try to focus on council tax)")
        question = st.text_input("Enter your question:")
        if st.button("Get Answer"):
            if question:
                try:
                    response = query_engine.query(question)

                    # Try to extract the generated text
                    try:
                        # Extract the text from the response object (assuming it has a `text` attribute or method)
                        if hasattr(response, 'text'):
                            answer = response.text
                        else:
                            answer = str(response)

                    except AttributeError as e:
                        st.error(f"Error extracting text from response: {e}")
                        answer = "Sorry, could not generate an answer."

                    st.write(f"**Answer:** {answer}")

                    # Save question and answer to database
                    save_to_db(question, answer, version)
                except Exception as e:
                    st.error(f"An error occurred: {e}")
            else:
                st.warning("Please enter a question")

    with tab3:
        st.subheader("View Q&A History")
        qa_data = fetch_from_db()
        if qa_data:
            df = pd.DataFrame(qa_data, columns=["Question", "Answer", "Version"])
            st.dataframe(df)

            # Provide download option for Q&A history as CSV
            st.write("Due to a current bug, the app may not respond as expected. Below is an extract of the test results from previous interactions:")
            csv = df.to_csv(index=False).encode('utf-8')
            st.download_button(
                label="Download Q&A history as CSV",
                data=csv,
                file_name='qa_output.csv',
                mime='text/csv',
            )
        else:
            st.write("No data available")
            
        # Check if the existing CSV file is present in the working directory
        if os.path.exists('qa_output.csv'):
            st.write("Additionally, you can download the pre-existing CSV file with historical data:")
            with open('qa_output.csv', 'rb') as f:
                st.download_button(
                    label="Download existing Q&A history CSV",
                    data=f,
                    file_name='qa_output.csv',
                    mime='text/csv',
                )


if __name__ == "__main__":
    main()