Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -231,6 +231,14 @@ def display_docs(docs):
|
|
231 |
return output_str
|
232 |
|
233 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
234 |
def ask_llm(system, user_input):
|
235 |
messages = [
|
236 |
{
|
@@ -289,9 +297,9 @@ def ask_gpt(query, ui_session_id, history):
|
|
289 |
print("ASKGPT after loading",session_id,len(db.index_to_docstore_id))
|
290 |
except:
|
291 |
print(f"SESSION: {session_id} database does not exist")
|
292 |
-
return f"SESSION: {session_id} database does not exist","",""
|
293 |
|
294 |
-
docs = db.similarity_search(query)
|
295 |
|
296 |
documents = "\n\n*-*-*-*-*-*\n\n".join(f"Content: {doc.page_content}\n" for doc in docs)
|
297 |
system = f"# Instructions\nTake a deep breath and resonate step by step.\nYou are a helpful standard assistant. Your have only one mission and that consists in answering to the user input based on the **provided documents**. If the answer to the question that is asked by the user isn't contained in the **provided documents**, say so but **don't make up an answer**. I chose you because you can say 'I don't know' so please don't do like the other LLMs and don't define acronyms that aren\'t present in the following **PROVIDED DOCUMENTS** double check if it is present before answering. If some of the information can be useful for the user you can tell him.\nFinish your response by **ONE** follow up question that the provided documents could answer.\n\nThe documents are separated by the string \'*-*-*-*-*-*\'. Do not provide any explanations or details.\n\n# **Provided documents**: {documents}."
|
@@ -314,6 +322,36 @@ def auth_user(ui_session_id):
|
|
314 |
else:
|
315 |
return gr.Textbox(label='Username', visible=True), gr.File(file_count="multiple", file_types=[".txt", ".pdf",".zip",".docx"], visible=False), gr.Button("Reset AI Knowledge", visible=False), gr.Markdown(label='AI Answer', visible=False), gr.Textbox(placeholder="Type your question", label="Question ❔", scale=9, visible=False), gr.Button("▶", scale=1, visible=False), gr.Textbox(label='Sources', show_copy_button=True, visible=False), gr.File(label="Zipped database", visible=False), gr.Textbox(label='History', show_copy_button=True, visible=False)
|
316 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
317 |
with gr.Blocks() as demo:
|
318 |
gr.Markdown("# Enrich an LLM knowledge with your own documents 🧠🤖")
|
319 |
|
@@ -328,6 +366,13 @@ with gr.Blocks() as demo:
|
|
328 |
with gr.Row():
|
329 |
query_input = gr.Textbox(placeholder="Type your question", label="Question ❔", scale=9, visible=False)
|
330 |
btn_askGPT = gr.Button("▶", scale=1, visible=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
331 |
|
332 |
tb_sources = gr.Textbox(label='Sources', show_copy_button=True, visible=False)
|
333 |
|
@@ -344,5 +389,12 @@ with gr.Blocks() as demo:
|
|
344 |
btn_askGPT.click(ask_gpt, inputs=[query_input, tb_session_id, tb_history], outputs=[answer_output, tb_sources, tb_history])
|
345 |
query_input.submit(ask_gpt, inputs=[query_input, tb_session_id, tb_history], outputs=[answer_output, tb_sources, tb_history])
|
346 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
347 |
|
348 |
demo.launch(debug=False,share=False)
|
|
|
231 |
return output_str
|
232 |
|
233 |
|
234 |
+
def display_docs_modal(docs):
|
235 |
+
output_list = []
|
236 |
+
for i, doc in enumerate(docs):
|
237 |
+
source = doc.metadata['source'].split('/')[-1]
|
238 |
+
output_str.append(f"Ref: {i+1}\n{repr(doc.page_content)}\nSource: {source}\n*§*§*\n")
|
239 |
+
return output_list
|
240 |
+
|
241 |
+
|
242 |
def ask_llm(system, user_input):
|
243 |
messages = [
|
244 |
{
|
|
|
297 |
print("ASKGPT after loading",session_id,len(db.index_to_docstore_id))
|
298 |
except:
|
299 |
print(f"SESSION: {session_id} database does not exist")
|
300 |
+
return f"SESSION: {session_id} database does not exist","","".
|
301 |
|
302 |
+
docs = db.similarity_search(query, k=5)
|
303 |
|
304 |
documents = "\n\n*-*-*-*-*-*\n\n".join(f"Content: {doc.page_content}\n" for doc in docs)
|
305 |
system = f"# Instructions\nTake a deep breath and resonate step by step.\nYou are a helpful standard assistant. Your have only one mission and that consists in answering to the user input based on the **provided documents**. If the answer to the question that is asked by the user isn't contained in the **provided documents**, say so but **don't make up an answer**. I chose you because you can say 'I don't know' so please don't do like the other LLMs and don't define acronyms that aren\'t present in the following **PROVIDED DOCUMENTS** double check if it is present before answering. If some of the information can be useful for the user you can tell him.\nFinish your response by **ONE** follow up question that the provided documents could answer.\n\nThe documents are separated by the string \'*-*-*-*-*-*\'. Do not provide any explanations or details.\n\n# **Provided documents**: {documents}."
|
|
|
322 |
else:
|
323 |
return gr.Textbox(label='Username', visible=True), gr.File(file_count="multiple", file_types=[".txt", ".pdf",".zip",".docx"], visible=False), gr.Button("Reset AI Knowledge", visible=False), gr.Markdown(label='AI Answer', visible=False), gr.Textbox(placeholder="Type your question", label="Question ❔", scale=9, visible=False), gr.Button("▶", scale=1, visible=False), gr.Textbox(label='Sources', show_copy_button=True, visible=False), gr.File(label="Zipped database", visible=False), gr.Textbox(label='History', show_copy_button=True, visible=False)
|
324 |
|
325 |
+
def display_info0(documents):
|
326 |
+
try:
|
327 |
+
gr.Info(documents.split("\n*§*§*\n")[0])
|
328 |
+
except Exception as e:
|
329 |
+
gr.Info("No Document")
|
330 |
+
|
331 |
+
def display_info1(documents):
|
332 |
+
try:
|
333 |
+
gr.Info(documents.split("\n*§*§*\n")[1])
|
334 |
+
except Exception as e:
|
335 |
+
gr.Info("No Document")
|
336 |
+
|
337 |
+
def display_info2(documents):
|
338 |
+
try:
|
339 |
+
gr.Info(documents.split("\n*§*§*\n")[2])
|
340 |
+
except Exception as e:
|
341 |
+
gr.Info("No Document")
|
342 |
+
|
343 |
+
def display_info3(documents):
|
344 |
+
try:
|
345 |
+
gr.Info(documents.split("\n*§*§*\n")[3])
|
346 |
+
except Exception as e:
|
347 |
+
gr.Info("No Document")
|
348 |
+
|
349 |
+
def display_info4(documents):
|
350 |
+
try:
|
351 |
+
gr.Info(documents.split("\n*§*§*\n")[4])
|
352 |
+
except Exception as e:
|
353 |
+
gr.Info("No Document")
|
354 |
+
|
355 |
with gr.Blocks() as demo:
|
356 |
gr.Markdown("# Enrich an LLM knowledge with your own documents 🧠🤖")
|
357 |
|
|
|
366 |
with gr.Row():
|
367 |
query_input = gr.Textbox(placeholder="Type your question", label="Question ❔", scale=9, visible=False)
|
368 |
btn_askGPT = gr.Button("▶", scale=1, visible=False)
|
369 |
+
with gr.Row():
|
370 |
+
btn1 = gr.Button("Ref 1")
|
371 |
+
btn2 = gr.Button("Ref 2")
|
372 |
+
btn3 = gr.Button("Ref 3")
|
373 |
+
btn4 = gr.Button("Ref 4")
|
374 |
+
btn5 = gr.Button("Ref 5")
|
375 |
+
|
376 |
|
377 |
tb_sources = gr.Textbox(label='Sources', show_copy_button=True, visible=False)
|
378 |
|
|
|
389 |
btn_askGPT.click(ask_gpt, inputs=[query_input, tb_session_id, tb_history], outputs=[answer_output, tb_sources, tb_history])
|
390 |
query_input.submit(ask_gpt, inputs=[query_input, tb_session_id, tb_history], outputs=[answer_output, tb_sources, tb_history])
|
391 |
|
392 |
+
btn1.click(display_info0, inputs=tb_sources, outputs=None)
|
393 |
+
btn2.click(display_info1, inputs=tb_sources, outputs=None)
|
394 |
+
btn3.click(display_info2, inputs=tb_sources, outputs=None)
|
395 |
+
btn4.click(display_info3, inputs=tb_sources, outputs=None)
|
396 |
+
btn5.click(display_info4, inputs=tb_sources, outputs=None)
|
397 |
+
|
398 |
+
|
399 |
|
400 |
demo.launch(debug=False,share=False)
|