|
import numpy as np |
|
import io |
|
import os |
|
import zipfile |
|
import logging |
|
import collections |
|
import tempfile |
|
from langchain.document_loaders import UnstructuredFileLoader |
|
from langchain.text_splitter import CharacterTextSplitter |
|
from langchain.vectorstores import FAISS |
|
from langchain.embeddings import HuggingFaceEmbeddings |
|
import gradio as gr |
|
|
|
from langchain.document_loaders import PDFMinerPDFasHTMLLoader |
|
from bs4 import BeautifulSoup |
|
import re |
|
from langchain.docstore.document import Document |
|
|
|
import unstructured |
|
from unstructured.partition.docx import partition_docx |
|
from unstructured.partition.auto import partition |
|
|
|
from transformers import AutoTokenizer |
|
|
|
from pypdf import PdfReader |
|
|
|
import pandas as pd |
|
|
|
import requests |
|
import json |
|
|
|
MODEL = "thenlper/gte-base" |
|
CHUNK_SIZE = 1500 |
|
CHUNK_OVERLAP = 400 |
|
|
|
embeddings = HuggingFaceEmbeddings( |
|
model_name=MODEL, |
|
cache_folder=os.getenv("SENTENCE_TRANSFORMERS_HOME") |
|
) |
|
|
|
|
|
|
|
model_id = "mistralai/Mistral-7B-Instruct-v0.1" |
|
access_token = os.getenv("HUGGINGFACE_SPLITFILES_API_KEY") |
|
|
|
tokenizer = AutoTokenizer.from_pretrained( |
|
model_id, |
|
padding_side="left", |
|
token = access_token |
|
) |
|
|
|
text_splitter = CharacterTextSplitter( |
|
separator = "\n", |
|
chunk_size = CHUNK_SIZE, |
|
chunk_overlap = CHUNK_OVERLAP, |
|
length_function = len, |
|
) |
|
|
|
|
|
|
|
|
|
|
|
def function_split_call(fi_input, dropdown, choice, chunk_size): |
|
if choice == "Intelligent split": |
|
return split_in_df(fi_input) |
|
elif choice == "Non intelligent split": |
|
return non_intelligent_split(fi_input, chunk_size) |
|
else: |
|
return split_by_keywords(fi_input,dropdown) |
|
|
|
def change_textbox(dropdown,radio): |
|
if len(dropdown) == 0 : |
|
dropdown = ["introduction", "objective", "summary", "conclusion"] |
|
if radio == "Intelligent split by keywords": |
|
return gr.Dropdown(dropdown, multiselect=True, visible=True, allow_custom_value=True), gr.Number(visible=False) |
|
elif radio == "Non intelligent split": |
|
return gr.Dropdown(dropdown, visible=False),gr.Number(label="Chunk size", value=1000, interactive=True, visible=True) |
|
else: |
|
return gr.Dropdown(dropdown, visible=False),gr.Number(visible=False) |
|
|
|
|
|
def group_text_by_font_size(content): |
|
cur_fs = [] |
|
cur_text = '' |
|
cur_page = -1 |
|
cur_c = content[0] |
|
multi_fs = False |
|
snippets = [] |
|
for c in content: |
|
|
|
if c.find('a') != None and c.find('a').get('name'): |
|
cur_page = int(c.find('a').get('name')) |
|
sp_list = c.find_all('span') |
|
if not sp_list: |
|
continue |
|
for sp in sp_list: |
|
|
|
if not sp: |
|
continue |
|
st = sp.get('style') |
|
if not st: |
|
continue |
|
fs = re.findall('font-size:(\d+)px',st) |
|
|
|
if not fs: |
|
continue |
|
fs = [int(fs[0])] |
|
if len(cur_fs)==0: |
|
cur_fs = fs |
|
if fs == cur_fs: |
|
cur_text += sp.text |
|
elif not sp.find('br') and cur_c==c: |
|
cur_text += sp.text |
|
cur_fs.extend(fs) |
|
multi_fs = True |
|
elif sp.find('br') and multi_fs == True: |
|
cur_fs.extend(fs) |
|
snippets.append((cur_text+sp.text,max(cur_fs), cur_page)) |
|
cur_fs = [] |
|
cur_text = '' |
|
cur_c = c |
|
multi_fs = False |
|
else: |
|
snippets.append((cur_text,max(cur_fs), cur_page)) |
|
cur_fs = fs |
|
cur_text = sp.text |
|
cur_c = c |
|
multi_fs = False |
|
snippets.append((cur_text,max(cur_fs), cur_page)) |
|
return snippets |
|
|
|
def get_titles_fs(fs_list): |
|
filtered_fs_list = [item[0] for item in fs_list if item[0] > fs_list[0][0]] |
|
return sorted(filtered_fs_list, reverse=True) |
|
|
|
def calculate_total_characters(snippets): |
|
font_sizes = {} |
|
|
|
for text, font_size, _ in snippets: |
|
|
|
cleaned_text = text.replace('\n', '') |
|
|
|
total_characters = len(cleaned_text) |
|
|
|
|
|
if font_size in font_sizes: |
|
font_sizes[font_size] += total_characters |
|
else: |
|
font_sizes[font_size] = total_characters |
|
|
|
size_charac_list = sorted(font_sizes.items(), key=lambda x: x[1], reverse=True) |
|
|
|
return size_charac_list |
|
|
|
def create_documents(source, snippets, font_sizes): |
|
docs = [] |
|
|
|
titles_fs = get_titles_fs(font_sizes) |
|
|
|
for snippet in snippets: |
|
cur_fs = snippet[1] |
|
if cur_fs>font_sizes[0][0] and len(snippet[0])>2: |
|
content = min((titles_fs.index(cur_fs)+1), 3)*"#" + " " + snippet[0].replace(" ", " ") |
|
category = "Title" |
|
else: |
|
content = snippet[0].replace(" ", " ") |
|
category = "Paragraph" |
|
metadata={"source":source, "filename":source.split("/")[-1], "file_directory": "/".join(source.split("/")[:-1]), "file_category":"", "file_sub-cat":"", "file_sub2-cat":"", "category":category, "filetype":source.split(".")[-1], "page_number":snippet[2]} |
|
categories = source.split("/") |
|
cat_update="" |
|
if len(categories)>4: |
|
cat_update = {"file_category":categories[1], "file_sub-cat":categories[2], "file_sub2-cat":categories[3]} |
|
elif len(categories)>3: |
|
cat_update = {"file_category":categories[1], "file_sub-cat":categories[2]} |
|
elif len(categories)>2: |
|
cat_update = {"file_category":categories[1]} |
|
metadata.update(cat_update) |
|
docs.append(Document(page_content=content, metadata=metadata)) |
|
return docs |
|
|
|
|
|
|
|
|
|
def group_chunks_by_section(chunks, min_chunk_size=64): |
|
filtered_chunks = [chunk for chunk in chunks if chunk.metadata['category'] != 'PageBreak'] |
|
|
|
new_chunks = [] |
|
seen_paragraph = False |
|
new_title = True |
|
for i, chunk in enumerate(filtered_chunks): |
|
|
|
if new_title: |
|
|
|
new_chunk = chunk |
|
new_title = False |
|
add_content = False |
|
new_chunk.metadata['titles'] = "" |
|
|
|
|
|
if chunk.metadata['category'].lower() =='title': |
|
new_chunk.metadata['titles'] += f"{chunk.page_content} ~~ " |
|
else: |
|
|
|
seen_paragraph = True |
|
|
|
|
|
if add_content: |
|
new_chunk.page_content += f"\n{chunk.page_content}" |
|
|
|
try: |
|
new_chunk.metadata['end_page'] = chunk.metadata['page_number'] |
|
except: |
|
print("", end="") |
|
|
|
|
|
add_content = True |
|
|
|
|
|
try: |
|
|
|
if filtered_chunks[i+1].metadata['category'].lower() =="title" and seen_paragraph and len(new_chunk.page_content)>min_chunk_size: |
|
if 'category' in new_chunk.metadata: |
|
new_chunk.metadata.pop('category') |
|
new_chunks.append(new_chunk) |
|
new_title = True |
|
seen_paragraph = False |
|
|
|
except: |
|
new_chunks.append(new_chunk) |
|
|
|
break |
|
return new_chunks |
|
|
|
|
|
|
|
|
|
def split_pdf(file_path, folder): |
|
loader = PDFMinerPDFasHTMLLoader(file_path) |
|
|
|
data = loader.load()[0] |
|
soup = BeautifulSoup(data.page_content,'html.parser') |
|
content = soup.find_all('div') |
|
try: |
|
snippets = group_text_by_font_size(content) |
|
except Exception as e: |
|
print("ERROR WHILE GROUPING BY FONT SIZE", e) |
|
snippets = [("ERROR WHILE GROUPING BY FONT SIZE", 0, -1)] |
|
font_sizes = calculate_total_characters(snippets) |
|
chunks = create_documents(file_path, snippets, font_sizes) |
|
return chunks |
|
|
|
|
|
def split_docx(file_path, folder): |
|
chunks_elms = partition_docx(filename=file_path) |
|
chunks = [] |
|
file_categories = file_path.split("/") |
|
for chunk_elm in chunks_elms: |
|
category = chunk_elm.category |
|
if category == "Title": |
|
chunk = Document(page_content= min(chunk_elm.metadata.to_dict()['category_depth']+1, 3)*"#" + ' ' + chunk_elm.text, metadata=chunk_elm.metadata.to_dict()) |
|
else: |
|
chunk = Document(page_content=chunk_elm.text, metadata=chunk_elm.metadata.to_dict()) |
|
metadata={"source":file_path, "filename":file_path.split("/")[-1], "file_category":"", "file_sub-cat":"", "file_sub2-cat":"", "category":category, "filetype":file_path.split(".")[-1]} |
|
cat_update="" |
|
if len(file_categories)>4: |
|
cat_update = {"file_category":file_categories[1], "file_sub-cat":file_categories[2], "file_sub2-cat":file_categories[3]} |
|
elif len(file_categories)>3: |
|
cat_update = {"file_category":file_categories[1], "file_sub-cat":file_categories[2]} |
|
elif len(file_categories)>2: |
|
cat_update = {"file_category":file_categories[1]} |
|
metadata.update(cat_update) |
|
chunk.metadata.update(metadata) |
|
chunks.append(chunk) |
|
return chunks |
|
|
|
|
|
|
|
def rebuild_index(input_folder, output_folder): |
|
paths_time = [] |
|
to_keep = set() |
|
print(f'number of files {len(paths_time)}') |
|
if len(output_folder.list_paths_in_partition()) > 0: |
|
with tempfile.TemporaryDirectory() as temp_dir: |
|
for f in output_folder.list_paths_in_partition(): |
|
with output_folder.get_download_stream(f) as stream: |
|
with open(os.path.join(temp_dir, os.path.basename(f)), "wb") as f2: |
|
f2.write(stream.read()) |
|
index = FAISS.load_local(temp_dir, embeddings) |
|
to_remove = [] |
|
logging.info(f"{len(index.docstore._dict)} vectors loaded") |
|
for idx, doc in index.docstore._dict.items(): |
|
source = (doc.metadata["source"], doc.metadata["last_modified"]) |
|
if source in paths_time: |
|
|
|
to_keep.add(source) |
|
else: |
|
|
|
to_remove.append(idx) |
|
|
|
docstore_id_to_index = {v: k for k, v in index.index_to_docstore_id.items()} |
|
|
|
|
|
vectors_to_remove = [] |
|
for idx in to_remove: |
|
del index.docstore._dict[idx] |
|
ind = docstore_id_to_index[idx] |
|
del index.index_to_docstore_id[ind] |
|
vectors_to_remove.append(ind) |
|
index.index.remove_ids(np.array(vectors_to_remove, dtype=np.int64)) |
|
|
|
index.index_to_docstore_id = { |
|
i: ind |
|
for i, ind in enumerate(index.index_to_docstore_id.values()) |
|
} |
|
logging.info(f"{len(to_remove)} vectors removed") |
|
else: |
|
index = None |
|
to_add = [path[0] for path in paths_time if path not in to_keep] |
|
print(f'to_keep: {to_keep}') |
|
print(f'to_add: {to_add}') |
|
return index, to_add |
|
|
|
|
|
def split_chunks_by_tokens(documents, max_length=170, overlap=10): |
|
|
|
resized = [] |
|
|
|
|
|
for doc in documents: |
|
encoded = tokenizer.encode(doc.page_content) |
|
if len(encoded) > max_length: |
|
remaining_encoded = tokenizer.encode(doc.page_content) |
|
while len(remaining_encoded) > 0: |
|
split_doc = Document(page_content=tokenizer.decode(remaining_encoded[:max(10, max_length)]), metadata=doc.metadata.copy()) |
|
resized.append(split_doc) |
|
remaining_encoded = remaining_encoded[max(10, max_length - overlap):] |
|
|
|
else: |
|
resized.append(doc) |
|
print(f"Number of chunks before resplitting: {len(documents)} \nAfter splitting: {len(resized)}") |
|
return resized |
|
|
|
|
|
def split_chunks_by_tokens_period(documents, max_length=170, overlap=10, min_chunk_size=20): |
|
|
|
resized = [] |
|
previous_file="" |
|
to_encode = "" |
|
skip_next = False |
|
|
|
for i, doc in enumerate(documents): |
|
if skip_next: |
|
skip_next = False |
|
continue |
|
current_file = doc.metadata['source'] |
|
if current_file != previous_file: |
|
previous_file = current_file |
|
chunk_counter = 0 |
|
is_first_chunk = True |
|
to_encode += doc.page_content |
|
|
|
try: |
|
if (documents[i+1] is documents[-1] or documents[i+1].metadata['source'] != documents[i+2].metadata['source']) and len(tokenizer.encode(documents[i+1].page_content)) < min_chunk_size: |
|
|
|
skip_next = True |
|
to_encode += documents[i+1].page_content |
|
except Exception as e: |
|
print(e) |
|
|
|
encoded = tokenizer.encode(to_encode) |
|
if len(encoded) < min_chunk_size and not skip_next: |
|
|
|
continue |
|
elif skip_next: |
|
split_doc = Document(page_content=tokenizer.decode(encoded).replace('<s> ', ''), metadata=doc.metadata.copy()) |
|
split_doc.metadata['token_length'] = len(tokenizer.encode(split_doc.page_content)) |
|
resized.append(split_doc) |
|
|
|
to_encode = "" |
|
continue |
|
else: |
|
|
|
to_encode = "" |
|
if len(encoded) > max_length: |
|
|
|
remaining_encoded = encoded |
|
is_last_chunk = False |
|
while len(remaining_encoded) > 1 and not is_last_chunk: |
|
|
|
overlap_text = tokenizer.decode(remaining_encoded[:overlap]) |
|
period_index_b = overlap_text.find('.') |
|
if len(remaining_encoded)>max_length + min_chunk_size: |
|
|
|
current_encoded = remaining_encoded[:max(10, max_length)] |
|
else: |
|
|
|
current_encoded = remaining_encoded |
|
is_last_chunk = True |
|
split_doc = Document(page_content=tokenizer.decode(current_encoded).replace('<s> ', ''), metadata=doc.metadata.copy()) |
|
split_doc.metadata['token_length'] = len(tokenizer.encode(split_doc.page_content)) |
|
resized.append(split_doc) |
|
|
|
break |
|
period_index_e = -1 |
|
if len(remaining_encoded)>max_length+min_chunk_size: |
|
|
|
overlap_text_last = tokenizer.decode(current_encoded[-overlap:]) |
|
period_index_last = overlap_text_last.find('.') |
|
if period_index_last != -1 and period_index_last < len(overlap_text_last) - 1: |
|
|
|
period_index_e = period_index_last - len(overlap_text_last) |
|
|
|
|
|
if not is_first_chunk: |
|
|
|
if period_index_b == -1: |
|
|
|
split_doc = Document(page_content=tokenizer.decode(current_encoded)[:period_index_e].replace('<s> ', ''), metadata=doc.metadata.copy()) |
|
else: |
|
if is_last_chunk : |
|
|
|
split_doc = Document(page_content=tokenizer.decode(current_encoded)[period_index_b+1:].replace('<s> ', ''), metadata=doc.metadata.copy()) |
|
|
|
else: |
|
|
|
split_doc = Document(page_content=tokenizer.decode(current_encoded)[period_index_b+1:period_index_e].replace('<s> ', ''), metadata=doc.metadata.copy()) |
|
else: |
|
|
|
split_doc = Document(page_content=tokenizer.decode(current_encoded)[:period_index_e].replace('<s> ', ''), metadata=doc.metadata.copy()) |
|
if 'titles' in split_doc.metadata: |
|
|
|
chunk_counter += 1 |
|
split_doc.metadata['chunk_id'] = chunk_counter |
|
|
|
split_doc.metadata['token_length'] = len(tokenizer.encode(split_doc.page_content)) |
|
resized.append(split_doc) |
|
print(f"Added a document of {split_doc.metadata['token_length']} tokens 3") |
|
remaining_encoded = remaining_encoded[max(10, max_length - overlap):] |
|
is_first_chunk = False |
|
|
|
|
|
|
|
|
|
|
|
|
|
else: |
|
print(f"found a chunk with the perfect size:{len(encoded)}") |
|
|
|
if 'titles' in doc.metadata: |
|
chunk_counter += 1 |
|
doc.metadata['chunk_id'] = chunk_counter |
|
doc.metadata['token_length'] = len(encoded) |
|
doc.page_content = tokenizer.decode(encoded).replace('<s> ', '') |
|
resized.append(doc) |
|
print(f"Added a document of {doc.metadata['token_length']} tokens 4") |
|
print(f"Number of chunks before resplitting: {len(documents)} \nAfter splitting: {len(resized)}") |
|
return resized |
|
|
|
|
|
|
|
def split_doc_in_chunks(input_folder, base_folders): |
|
docs = [] |
|
for i, filename in enumerate(input_folder): |
|
path = filename |
|
print(f"Treating file {i}/{len(input_folder)}") |
|
|
|
chunks=[] |
|
if path.endswith(".pdf"): |
|
try: |
|
print("Treatment of pdf file", path) |
|
raw_chunks = split_pdf(path, input_folder) |
|
for raw_chunk in raw_chunks: |
|
print(f"BASE zzzzz LIST : {base_folders} = i = {i}") |
|
raw_chunk.metadata["Base Folder"] = base_folders[i] |
|
chunks = group_chunks_by_section(raw_chunks) |
|
print(f"Document splitted in {len(chunks)} chunks") |
|
|
|
|
|
except Exception as e: |
|
print("Error while splitting the pdf file: ", e) |
|
elif path.endswith(".docx"): |
|
try: |
|
print ("Treatment of docx file", path) |
|
raw_chunks = split_docx(path, input_folder) |
|
for raw_chunk in raw_chunks: |
|
raw_chunk.metadata["Base Folder"] = base_folders[i] |
|
|
|
chunks = group_chunks_by_section(raw_chunks) |
|
print(f"Document splitted in {len(chunks)} chunks") |
|
|
|
|
|
|
|
except Exception as e: |
|
print("Error while splitting the docx file: ", e) |
|
elif path.endswith(".doc"): |
|
try: |
|
loader = UnstructuredFileLoader(path) |
|
|
|
chunks = loader.load_and_split(text_splitter=text_splitter) |
|
counter, counter2 = collections.Counter(), collections.Counter() |
|
filename = os.path.basename(path) |
|
|
|
for chunk in chunks: |
|
chunk.metadata["filename"] = filename.split("/")[-1] |
|
chunk.metadata["file_directory"] = filename.split("/")[:-1] |
|
chunk.metadata["filetype"] = filename.split(".")[-1] |
|
chunk.metadata["Base Folder"] = base_folders[i] |
|
if "page" in chunk.metadata: |
|
counter[chunk.metadata['page']] += 1 |
|
for i in range(len(chunks)): |
|
counter2[chunks[i].metadata['page']] += 1 |
|
chunks[i].metadata['source'] = filename |
|
else: |
|
if len(chunks) == 1: |
|
chunks[0].metadata['source'] = filename |
|
|
|
except Exception as e: |
|
print(f"An error occurred: {e}") |
|
try: |
|
if len(chunks)>0: |
|
docs += chunks |
|
except NameError as e: |
|
print(f"An error has occured: {e}") |
|
return docs |
|
|
|
|
|
def resplit_by_end_of_sentence(docs, max_len, overlap, min_len): |
|
print("❌❌\nResplitting docs by end of sentence\n❌❌") |
|
resized_docs = split_chunks_by_tokens_period(docs, max_len, overlap, min_len) |
|
try: |
|
|
|
cur_source = "" |
|
cpt_chunk = 1 |
|
for resized_doc in resized_docs: |
|
try: |
|
title = resized_doc.metadata['titles'].split(' ~~ ')[-2] |
|
if title not in resized_doc.page_content: |
|
resized_doc.page_content = title + "\n" + resized_doc.page_content |
|
if cur_source == resized_doc.metadata["source"]: |
|
resized_doc.metadata['chunk_number'] = cpt_chunk |
|
else: |
|
cpt_chunk = 1 |
|
cur_source = resized_doc.metadata["source"] |
|
resized_doc.metadata['chunk_number'] = cpt_chunk |
|
except Exception as e: |
|
print("An error occured: ", e) |
|
|
|
cpt_chunk += 1 |
|
except Exception as e: |
|
print('AN ERROR OCCURRED: ', e) |
|
return resized_docs |
|
|
|
|
|
def build_index(docs, index, output_folder): |
|
if len(docs) > 0: |
|
if index is not None: |
|
|
|
new_index = FAISS.from_documents(docs, embeddings) |
|
index.merge_from(new_index) |
|
else: |
|
index = FAISS.from_documents(docs, embeddings) |
|
with tempfile.TemporaryDirectory() as temp_dir: |
|
index.save_local(temp_dir) |
|
for f in os.listdir(temp_dir): |
|
output_folder.upload_file(f, os.path.join(temp_dir, f)) |
|
|
|
|
|
def extract_zip(zip_path): |
|
extracted_files = [] |
|
with zipfile.ZipFile(zip_path, 'r') as zip_ref: |
|
for file_info in zip_ref.infolist(): |
|
extracted_files.append(file_info.filename) |
|
zip_ref.extract(file_info.filename) |
|
return extracted_files |
|
|
|
def split_in_df(files): |
|
processed_files = [] |
|
base_folders = [] |
|
print("Processing zip files...") |
|
for file_path in files: |
|
if file_path.endswith('.zip'): |
|
extracted_files = extract_zip(file_path) |
|
processed_files.extend(extracted_files) |
|
base_folders.extend([os.path.splitext(os.path.basename(file_path))[0]] * len(extracted_files)) |
|
else: |
|
processed_files.append(file_path) |
|
base_folders.append("") |
|
print(f"BASE FOLDERS LIST : {base_folders}") |
|
print("Finished processing zip files\nSplitting files into chunks...") |
|
documents = split_doc_in_chunks(processed_files, base_folders) |
|
re_docs = resplit_by_end_of_sentence(documents, 1000, 100, 1500) |
|
print("Finished splitting") |
|
df = pd.DataFrame() |
|
for re_doc in re_docs: |
|
filename = re_doc.metadata['filename'] |
|
content = re_doc.page_content |
|
|
|
|
|
|
|
|
|
|
|
doc_data = {'Filename': filename, 'Content': content} |
|
|
|
doc_data["Token_Length"] = re_doc.metadata['token_length'] |
|
doc_data["Titles"] = re_doc.metadata['titles'] if 'titles' in re_doc.metadata else "" |
|
doc_data["Base Folder"] = re_doc.metadata["Base Folder"] |
|
|
|
|
|
|
|
|
|
df = pd.concat([df, pd.DataFrame([doc_data])], ignore_index=True) |
|
|
|
df.to_excel("dataframe.xlsx", index=False) |
|
|
|
return "dataframe.xlsx" |
|
|
|
|
|
|
|
|
|
|
|
def split_by_keywords(files, key_words, words_limit=1000): |
|
processed_files = [] |
|
extracted_content = [] |
|
tabLine = [] |
|
|
|
|
|
try: |
|
not_duplicate = True |
|
for f in files: |
|
for p in processed_files: |
|
if (f[:f.rfind('.')] == p[:p.rfind('.')]): |
|
not_duplicate = False |
|
if not_duplicate: |
|
if f.endswith('.zip'): |
|
extracted_files = extract_zip(f) |
|
print(f"Those are my extracted files{extracted_files}") |
|
|
|
for doc in extracted_files: |
|
if doc.endswith('.doc') or doc.endswith('.docx'): |
|
processed_files.append(transform_to_pdf(doc)) |
|
|
|
if doc.endswith('.pdf'): |
|
processed_files.append(doc) |
|
|
|
if f.endswith('.pdf'): |
|
processed_files.append(f) |
|
|
|
if f.endswith('.doc') or f.endswith('.docx'): |
|
processed_files.append(transform_to_pdf(f)) |
|
|
|
except Exception as ex: |
|
print(f"Error occured while processing files : {ex}") |
|
|
|
|
|
for file in processed_files: |
|
|
|
try: |
|
file_name = file |
|
file = PdfReader(file) |
|
pdfNumberPages = len(file.pages) |
|
for pdfPage in range(0, pdfNumberPages): |
|
|
|
load_page = file.get_page(pdfPage) |
|
text = load_page.extract_text() |
|
lines = text.split("\n") |
|
sizeOfLines = len(lines) - 1 |
|
|
|
for index, line in enumerate(lines): |
|
print(line) |
|
for key in key_words: |
|
if key in line: |
|
print("Found keyword") |
|
lineBool = True |
|
lineIndex = index |
|
previousSelectedLines = [] |
|
stringLength = 0 |
|
linesForSelection = lines |
|
loadOnce = True |
|
selectedPdfPage = pdfPage |
|
|
|
while lineBool: |
|
print(lineIndex) |
|
if stringLength > words_limit or lineIndex < 0: |
|
lineBool = False |
|
else: |
|
if lineIndex == 0: |
|
print(f"Line index == 0") |
|
|
|
if pdfPage == 0: |
|
lineBool = False |
|
|
|
else: |
|
try: |
|
selectedPdfPage -= 1 |
|
newLoad_page = file.get_page(selectedPdfPage) |
|
newText = newLoad_page.extract_text() |
|
newLines = newText.split("\n") |
|
linesForSelection = newLines |
|
print(f"len newLines{len(newLines)}") |
|
lineIndex = len(newLines) - 1 |
|
except Exception as e: |
|
print(f"Loading previous PDF page failed") |
|
lineBool = False |
|
|
|
previousSelectedLines.append(linesForSelection[lineIndex]) |
|
stringLength += len(linesForSelection[lineIndex]) |
|
|
|
lineIndex -= 1 |
|
previousSelectedLines = ' '.join(previousSelectedLines[::-1]) |
|
|
|
lineBool = True |
|
lineIndex = index + 1 |
|
nextSelectedLines = "" |
|
linesForSelection = lines |
|
loadOnce = True |
|
selectedPdfPage = pdfPage |
|
|
|
while lineBool: |
|
|
|
if len(nextSelectedLines.split()) > words_limit: |
|
lineBool = False |
|
else: |
|
if lineIndex > sizeOfLines: |
|
lineBool = False |
|
|
|
if pdfPage == pdfNumberPages - 1: |
|
lineBool = False |
|
|
|
else: |
|
try: |
|
selectedPdfPage += 1 |
|
newLoad_page = file.get_page(selectedPdfPage) |
|
newText = newLoad_page.extract_text() |
|
newLines = newText.split("\n") |
|
linesForSelection = newLines |
|
lineIndex = 0 |
|
except Exception as e: |
|
print(f"Loading next PDF page failed") |
|
lineBool = False |
|
else: |
|
nextSelectedLines += " " + linesForSelection[lineIndex] |
|
lineIndex += 1 |
|
|
|
print(f"Previous Lines : {previousSelectedLines}") |
|
print(f"Next Lines : {nextSelectedLines}") |
|
selectedText = previousSelectedLines + ' ' + nextSelectedLines |
|
print(selectedText) |
|
tabLine.append([file_name, selectedText, key]) |
|
print(f"Selected line in keywords is: {line}") |
|
|
|
except Exception as ex: |
|
print(f"Error occured while extracting content : {ex}") |
|
|
|
for r in tabLine: |
|
text_joined = ''.join(r[1]) |
|
text_joined = r[2] + " : \n " + text_joined |
|
extracted_content.append([r[0], text_joined]) |
|
|
|
df = pd.DataFrame() |
|
for content in extracted_content: |
|
filename = content[0] |
|
text = content[1] |
|
|
|
|
|
|
|
|
|
|
|
doc_data = {'Filename': filename[filename.rfind("/")+1:], 'Content': text} |
|
|
|
|
|
|
|
|
|
df = pd.concat([df, pd.DataFrame([doc_data])], ignore_index=True) |
|
|
|
df.to_excel("dataframe_keywords.xlsx", index=False) |
|
|
|
return "dataframe_keywords.xlsx" |
|
|
|
|
|
|
|
def transform_to_pdf(doc): |
|
instructions = {'parts': [{'file': 'document'}]} |
|
|
|
response = requests.request( |
|
'POST', |
|
'https://api.pspdfkit.com/build', |
|
headers = { 'Authorization': 'Bearer pdf_live_nS6tyylSW57PNw9TIEKKL3Tt16NmLCazlQWQ9D33t0Q'}, |
|
files = {'document': open(doc, 'rb')}, |
|
data = {'instructions': json.dumps(instructions)}, |
|
stream = True |
|
) |
|
|
|
pdf_name = doc[:doc.find(".doc")] + ".pdf" |
|
|
|
if response.ok: |
|
with open(pdf_name, 'wb') as fd: |
|
for chunk in response.iter_content(chunk_size=8096): |
|
fd.write(chunk) |
|
return pdf_name |
|
|
|
else: |
|
print(response.text) |
|
exit() |
|
return none |
|
|
|
|
|
def non_intelligent_split(files, chunk_size = 1000): |
|
extracted_content = [] |
|
processed_files = [] |
|
|
|
|
|
|
|
try: |
|
not_duplicate = True |
|
for f in files: |
|
for p in processed_files: |
|
if (f[:f.rfind('.')] == p[:p.rfind('.')]): |
|
not_duplicate = False |
|
if not_duplicate: |
|
if f.endswith('.zip'): |
|
extracted_files = extract_zip(f) |
|
print(f"Those are my extracted files{extracted_files}") |
|
|
|
for doc in extracted_files: |
|
if doc.endswith('.doc') or doc.endswith('.docx'): |
|
processed_files.append(transform_to_pdf(doc)) |
|
|
|
if doc.endswith('.pdf'): |
|
processed_files.append(doc) |
|
|
|
if f.endswith('.pdf'): |
|
processed_files.append(f) |
|
|
|
if f.endswith('.doc') or f.endswith('.docx'): |
|
processed_files.append(transform_to_pdf(f)) |
|
|
|
except Exception as ex: |
|
print(f"Error occured while processing files : {ex}") |
|
|
|
|
|
try: |
|
for f in processed_files: |
|
print(f"my filename is : {f}") |
|
file = PdfReader(f) |
|
pdfNumberPages = len(file.pages) |
|
selectedText = "" |
|
|
|
for pdfPage in range(0, pdfNumberPages): |
|
load_page = file.get_page(pdfPage) |
|
text = load_page.extract_text() |
|
lines = text.split("\n") |
|
sizeOfLines = 0 |
|
|
|
for index, line in enumerate(lines): |
|
sizeOfLines += len(line) |
|
selectedText += " " + line |
|
if sizeOfLines >= chunk_size: |
|
textContent = (f"Page {str(pdfPage)} : {selectedText}") |
|
extracted_content.append([f, textContent]) |
|
sizeOfLines = 0 |
|
selectedText = "" |
|
|
|
textContent = (f"Page {str(pdfNumberPages)} : {selectedText}") |
|
extracted_content.append([f, textContent]) |
|
except Exception as ex: |
|
print(f"Error occured while extracting content from processed files : {ex}") |
|
|
|
df = pd.DataFrame() |
|
for content in extracted_content: |
|
filename = content[0] |
|
text = content[1] |
|
|
|
doc_data = {'Filename': filename[filename.rfind("/")+1:], 'Content': text} |
|
|
|
df = pd.concat([df, pd.DataFrame([doc_data])], ignore_index=True) |
|
|
|
df.to_excel("dataframe_keywords.xlsx", index=False) |
|
|
|
return "dataframe_keywords.xlsx" |