File size: 4,728 Bytes
9e6b8ed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
from time import perf_counter

from jinja2 import Template

from backend.semantic_search import qd_retriever

template_string = """
Instructions: Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
Context:
---
{% for doc in documents %}
    {{ doc.content }}
    ---
{% endfor %}
Query: {{ query }}
"""

md_template_string = """
<b>Instructions</b>: 
<span style="color: green;">Use the following pieces of context to answer the question at the end.<br>If you don't know the answer, just say that you don't know, <span style="color: green; font-weight: bold;">don't try to make up an answer.</span></span><br>

<b>Context</b>:
{% for doc in documents %}
    <div id=\"box{{ loop.index }}\" style=\"border: 2px solid #aaa; padding: 10px; margin-top: 10px; border-radius: 5px; background-color: #1E90FF; position: relative; cursor: pointer;\">
        <div style=\"font-size: 0.8em; position: absolute; top: 10px; left: 10px;\"><b>Doc {{ loop.index }}</b></div>
        <span id=\"doc{{ loop.index }}-short\" style=\"color: white; display: block; margin-top: 20px;\">{{ doc.content[:50] }}...</span>
        <span id=\"doc{{ loop.index }}-full\" style=\"color: white; display: none; margin-top: 20px;\">{{ doc.content }}</span>
    </div>
{% endfor %}
<b>Query</b>: <span style=\"color: yellow;\">{{ query }}</span>
<script>
document.addEventListener("DOMContentLoaded", function() {
    {% for doc in documents %}
        document.getElementById("box{{ loop.index }}").addEventListener("click", function() {
            toggleContent('doc{{ loop.index }}');
        });
    {% endfor %}
});

function toggleContent(docID) {
    var shortContent = document.getElementById(docID + '-short');
    var fullContent = document.getElementById(docID + '-full');
    if (fullContent.style.display === 'none') {
        shortContent.style.display = 'none';
        fullContent.style.display = 'block';
    } else {
        shortContent.style.display = 'block';
        fullContent.style.display = 'none';
    }
}
</script>
"""

template = Template(template_string)
md_template = Template(md_template_string)
import gradio as gr

from backend.query_llm import generate


def add_text(history, text):
    history = [] if history is None else history
    history = history + [(text, None)]
    return history, gr.Textbox(value="", interactive=False)


def bot(history, system_prompt=""):
    top_k = 5
    query = history[-1][0]

    # Retrieve documents relevant to query
    document_start = perf_counter()
    documents = qd_retriever.retrieve(query, top_k=top_k)
    document_time = document_start - perf_counter()

    # Create Prompt
    prompt = template.render(documents=documents, query=query)
    md_prompt = md_template.render(documents=documents, query=query)

    # # Query LLM with prompt based on relevant documents
    # llm_start = perf_counter()
    # result = generate(prompt=prompt, history='')
    # llm_time = llm_start - perf_counter()
    # times = (document_time, llm_time)

    history[-1][1] = ""
    for character in generate(prompt, history[:-1]):
        history[-1][1] = character
        yield history, md_prompt


with gr.Blocks() as demo:
    with gr.Tab("Application"):
        chatbot = gr.Chatbot(
                [],
                elem_id="chatbot",
                avatar_images=('examples/lama.jpeg', 'examples/lama2.jpeg'),
                bubble_full_width=False,
                show_copy_button=True,
                show_share_button=True,
                )

        with gr.Row():
            txt = gr.Textbox(
                    scale=3,
                    show_label=False,
                    placeholder="Enter text and press enter",
                    container=False,
                    )
            txt_btn = gr.Button(value="Submit text", scale=1)

        prompt_md = gr.HTML()
        # Turn off interactivity while generating if you hit enter
        txt_msg = txt_btn.click(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
                bot, chatbot, [chatbot, prompt_md])

        # Turn it back on
        txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)

        # Turn off interactivity while generating if you hit enter
        txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
                bot, chatbot, [chatbot, prompt_md])

        # Turn it back on
        txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)

    gr.Examples(['What is the largest city on earth?', 'Who has the record for the fastest mile?'], txt)

demo.queue()
demo.launch(debug=True)