import langchain from langchain.embeddings import SentenceTransformerEmbeddings from langchain.chains.question_answering import load_qa_chain from langchain.document_loaders import UnstructuredPDFLoader,UnstructuredWordDocumentLoader from langchain.indexes import VectorstoreIndexCreator from langchain.vectorstores import FAISS from langchain import HuggingFaceHub from langchain import PromptTemplate from langchain.chat_models import ChatOpenAI from zipfile import ZipFile import gradio as gr import openpyxl import os import shutil from langchain.schema import Document from langchain.text_splitter import RecursiveCharacterTextSplitter import tiktoken import secrets import openai import time from duckduckgo_search import DDGS import requests import tempfile import pandas as pd import numpy as np from openai import OpenAI from mistralai.client import MistralClient from mistralai.models.chat_completion import ChatMessage MODEL_LIST = [ "mistral-tiny", "mistral-small", "mistral-medium", ] DEFAULT_MODEL = "mistral-small" DEFAULT_TEMPERATURE = 0.7 tokenizer = tiktoken.encoding_for_model("gpt-3.5-turbo") # create the length function def tiktoken_len(text): tokens = tokenizer.encode( text, disallowed_special=() ) return len(tokens) text_splitter = RecursiveCharacterTextSplitter( chunk_size=512, chunk_overlap=200, length_function=tiktoken_len, separators=["\n\n", "\n", " ", ""] ) embeddings = SentenceTransformerEmbeddings(model_name="thenlper/gte-base") foo = Document(page_content='foo is fou!',metadata={"source":'foo source'}) def reset_database(ui_session_id): session_id = f"PDFAISS-{ui_session_id}" if 'drive' in session_id: print("RESET DATABASE: session_id contains 'drive' !!") return None try: shutil.rmtree(session_id) except: print(f'no {session_id} directory present') try: os.remove(f"{session_id}.zip") except: print("no {session_id}.zip present") return None def is_duplicate(split_docs,db): epsilon=0.0 print(f"DUPLICATE: Treating: {split_docs[0].metadata['source'].split('/')[-1]}") for i in range(min(3,len(split_docs))): query = split_docs[i].page_content docs = db.similarity_search_with_score(query,k=1) _ , score = docs[0] epsilon += score print(f"DUPLICATE: epsilon: {epsilon}") return epsilon < 0.1 def merge_split_docs_to_db(split_docs,db,progress,progress_step=0.1): progress(progress_step,desc="merging docs") if len(split_docs)==0: print("MERGE to db: NO docs!!") return filename = split_docs[0].metadata['source'] # if is_duplicate(split_docs,db): #todo handle duplicate management # print(f"MERGE: Document is duplicated: {filename}") # return # print(f"MERGE: number of split docs: {len(split_docs)}") batch = 10 db1 = None for i in range(0, len(split_docs), batch): progress(i/len(split_docs),desc=f"added {i} chunks of {len(split_docs)} chunks") if db1: db1.add_documents(split_docs[i:i+batch]) else: db1 = FAISS.from_documents(split_docs[i:i+batch], embeddings) db1.save_local(split_docs[-1].metadata["source"].split(".")[-1]) #create an index with the same name as the file #db.merge_from(db1) #we do not merge anymore, instead, we create a new index for each file return db1 def merge_pdf_to_db(filename,session_folder,progress,progress_step=0.1): progress_step+=0.05 progress(progress_step,'unpacking pdf') doc = UnstructuredPDFLoader(filename).load() doc[0].metadata['source'] = filename.split('/')[-1] split_docs = text_splitter.split_documents(doc) progress_step+=0.3 progress(progress_step,'pdf unpacked') return merge_split_docs_to_db(split_docs,session_folder,progress,progress_step) def merge_docx_to_db(filename,session_folder,progress,progress_step=0.1): progress_step+=0.05 progress(progress_step,'unpacking docx') doc = UnstructuredWordDocumentLoader(filename).load() doc[0].metadata['source'] = filename.split('/')[-1] split_docs = text_splitter.split_documents(doc) progress_step+=0.3 progress(progress_step,'docx unpacked') return merge_split_docs_to_db(split_docs,session_folder,progress,progress_step) def merge_txt_to_db(filename,session_folder,progress,progress_step=0.1): progress_step+=0.05 progress(progress_step,'unpacking txt') with open(filename) as f: docs = text_splitter.split_text(f.read()) split_docs = [Document(page_content=doc,metadata={'source':filename.split('/')[-1]}) for doc in docs] progress_step+=0.3 progress(progress_step,'txt unpacked') return merge_split_docs_to_db(split_docs,session_folder,progress,progress_step) def unpack_zip_file(filename,db,progress): with ZipFile(filename, 'r') as zipObj: contents = zipObj.namelist() print(f"unpack zip: contents: {contents}") tmp_directory = filename.split('/')[-1].split('.')[-2] shutil.unpack_archive(filename, tmp_directory) if 'index.faiss' in [item.lower() for item in contents]: db2 = FAISS.load_local(tmp_directory, embeddings) db.merge_from(db2) return db for file in contents: if file.lower().endswith('.docx'): db = merge_docx_to_db(f"{tmp_directory}/{file}",db,progress) if file.lower().endswith('.pdf'): db = merge_pdf_to_db(f"{tmp_directory}/{file}",db,progress) if file.lower().endswith('.txt'): db = merge_txt_to_db(f"{tmp_directory}/{file}",db,progress) return db def unzip_db(filename, ui_session_id): with ZipFile(filename, 'r') as zipObj: contents = zipObj.namelist() print(f"unzip: contents: {contents}") tmp_directory = f"PDFAISS-{ui_session_id}" shutil.unpack_archive(filename, tmp_directory) def add_files_to_zip(session_id): zip_file_name = f"{session_id}.zip" with ZipFile(zip_file_name, "w") as zipObj: for root, dirs, files in os.walk(session_id): for file_name in files: file_path = os.path.join(root, file_name) arcname = os.path.relpath(file_path, session_id) zipObj.write(file_path, arcname) ## Search files functions ## def search_docs(topic, max_references): print(f"SEARCH PDF : {topic}") doc_list = [] with DDGS() as ddgs: i=0 for r in ddgs.text('{} filetype:pdf'.format(topic), region='wt-wt', safesearch='On', timelimit='n'): #doc_list.append(str(r)) if i>=max_references: break doc_list.append("TITLE : " + r['title'] + " -- BODY : " + r['body'] + " -- URL : " + r['href']) i+=1 return gr.update(choices=doc_list) def store_files(references, ret_names=False): url_list=[] temp_files = [] for ref in references: url_list.append(ref.split(" ")[-1]) for url in url_list: response = requests.get(url) if response.status_code == 200: filename = url.split('/')[-1] if filename.split('.')[-1] == 'pdf': filename = filename[:-4] print('File name.pdf :', filename) temp_file = tempfile.NamedTemporaryFile(delete=False,prefix=filename, suffix='.pdf') else: print('File name :', filename) temp_file = tempfile.NamedTemporaryFile(delete=False,prefix=filename, suffix='.pdf') temp_file.write(response.content) temp_file.close() if ret_names: temp_files.append(temp_file.name) else: temp_files.append(temp_file) return temp_files ## Summary functions ## ## Load each doc from the vector store def load_docs(ui_session_id): session_id_global_db = f"PDFAISS-{ui_session_id}" try: db = FAISS.load_local(session_id_global_db,embeddings) print("load_docs after loading global db:",session_id_global_db,len(db.index_to_docstore_id)) except: return f"SESSION: {session_id_global_db} database does not exist","","" docs = [] for i in range(1,len(db.index_to_docstore_id)): docs.append(db.docstore.search(db.index_to_docstore_id[i])) return docs # summarize with gpt 3.5 turbo def summarize_gpt(doc,system='provide a summary of the following document: ', first_tokens=600): doc = doc.replace('\n\n\n', '').replace('---', '').replace('...', '').replace('___', '') encoded = tokenizer.encode(doc) print("/n TOKENIZED : ", encoded) decoded = tokenizer.decode(encoded[:min(first_tokens, len(encoded))]) print("/n DOC SHORTEN", min(first_tokens, len(encoded)), " : ", decoded) completion = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=[ {"role": "system", "content": system}, {"role": "user", "content": decoded} ] ) return completion.choices[0].message["content"] def summarize_docs_generator(apikey_input, session_id): openai.api_key = apikey_input docs=load_docs(session_id) print("################# DOCS LOADED ##################", "docs type : ", type(docs[0])) try: fail = docs[0].page_content except: return docs[0] source = "" summaries = "" i = 0 while i last column label, last question return df for index, row in df.iterrows(): question = row.iloc[-1] print(f"Question: {question}") if len(question)<2: question = df.at[0, question_column].split("\n---\n")[0] db_folder = "/".join([session_folder, row["File name"]]) db = FAISS.load_local(db_folder,embeddings) print(f"\n\nQUESTION:\n{question}\n\n") docs = db.similarity_search(question) references = '\n******************************\n'.join([d.page_content for d in docs]) print(f"REFERENCES: {references}") try: source = upload_text_file(references) except: source = "ERROR WHILE GETTING THE SOURCES FILE" query = f"## USER QUESTION:\n{question}\n\n## REFERENCES:\n{references}\n\nANSWER:\n\n" try: answer = gpt_answer(api_key, query, model) except Exception as e: answer = "ERROR WHILE ANSWERING THE QUESTION" print("ERROR: ", e) complete_answer = add_line_breaks("\n---\n".join(["## " + question, answer, "[Sources](" + source + ")"])) answers.append(complete_answer) print(complete_answer) df[question_column] = answers return df def export_df(df, ftype): fname=secrets.token_urlsafe(16) if ftype=="xlsx": df.to_excel(f"{fname}.xlsx", index=False) return f"{fname}.xlsx" if ftype=="pkl": df.to_pickle(f"{fname}.pkl", index=False) return f"{fname}.pkl" if ftype=="csv": df.to_csv(f"{fname}.csv", index=False) return f"{fname}.csv" with gr.Blocks() as demo: gr.Markdown("Upload your documents and question them.") with gr.Accordion("Open to enter your API key", open=False): apikey_input = gr.Textbox(placeholder="Type here your OpenAI API key to use Summarization and Q&A", label="OpenAI API Key",type='password') dd_model = gr.Dropdown(["mistral-tiny", "mistral-small", "mistral-medium","gpt-3.5-turbo-1106", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4-1106-preview", "gpt-4", "gpt-4-32k"], value="gpt-3.5-turbo-1106", label='List of models', allow_custom_value=True, scale=1) with gr.Tab("Upload PDF & TXT"): with gr.Accordion("Get files from the web", open=False): with gr.Column(): topic_input = gr.Textbox(placeholder="Type your research", label="Research") with gr.Row(): max_files = gr.Slider(1, 30, step=1, value=10, label="Maximum number of files") btn_search = gr.Button("Search") dd_documents = gr.Dropdown(label='List of documents', info='Click to remove from selection', multiselect=True) with gr.Row(): btn_dl = gr.Button("Add these files to the Database") btn_export = gr.Button("⬇ Export selected files ⬇") tb_session_id = gr.Textbox(label='session id') docs_input = gr.File(file_count="multiple", file_types=[".txt", ".pdf",".zip",".docx"]) db_output = gr.File(label="Download zipped database") btn_generate_db = gr.Button("Generate database") btn_reset_db = gr.Button("Reset database") df_qna = gr.Dataframe(interactive=True, datatype="markdown") with gr.Row(): btn_clear_df = gr.Button("Clear df") btn_fill_answers = gr.Button("Fill table with generated answers") with gr.Accordion("Export dataframe", open=False): with gr.Row(): btn_export_df = gr.Button("Export df as", scale=1) r_format = gr.Radio(["xlsx", "pkl", "csv"], label="File type", value="xlsx", scale=2) file_df = gr.File(scale=1) btn_clear_df.click(update_df, inputs=[tb_session_id], outputs=df_qna) btn_fill_answers.click(ask_df, inputs=[df_qna, apikey_input, dd_model, tb_session_id], outputs=df_qna) btn_export_df.click(export_df, inputs=[df_qna, r_format], outputs=[file_df]) with gr.Tab("Summarize PDF"): with gr.Column(): summary_output = gr.Textbox(label='Summarized files') btn_summary = gr.Button("Summarize") with gr.Tab("Ask PDF"): with gr.Column(): query_input = gr.Textbox(placeholder="Type your question", label="Question") btn_askGPT = gr.Button("Answer") answer_output = gr.Textbox(label='GPT 3.5 answer') sources = gr.Textbox(label='Sources') history = gr.Textbox(label='History') topic_input.submit(search_docs, inputs=[topic_input, max_files], outputs=dd_documents) btn_search.click(search_docs, inputs=[topic_input, max_files], outputs=dd_documents) btn_dl.click(add_to_db, inputs=[dd_documents,tb_session_id], outputs=[db_output,tb_session_id]) btn_export.click(export_files, inputs=dd_documents, outputs=docs_input) btn_generate_db.click(embed_files, inputs=[docs_input,tb_session_id], outputs=[db_output,tb_session_id, df_qna]) btn_reset_db.click(reset_database,inputs=[tb_session_id],outputs=[db_output]) btn_summary.click(summarize_docs, inputs=[apikey_input,tb_session_id], outputs=summary_output) btn_askGPT.click(ask_gpt, inputs=[query_input,apikey_input,history,tb_session_id], outputs=[answer_output,sources,history]) #demo.queue(concurrency_count=10) demo.launch(debug=False,share=False)