Spaces:
Runtime error
Runtime error
import streamlit as st | |
st.set_page_config(layout="wide") | |
from annotated_text import annotated_text, annotation | |
import fitz | |
import os | |
import chromadb | |
import uuid | |
from pathlib import Path | |
import os | |
st.title("Contracts Classification ") | |
import pandas as pd | |
from langchain.retrievers import BM25Retriever, EnsembleRetriever | |
from langchain.schema import Document | |
from langchain.vectorstores import Chroma | |
from langchain.embeddings import HuggingFaceEmbeddings | |
from setfit import SetFitModel | |
# Download from the π€ Hub | |
clause_model = SetFitModel.from_pretrained("scholarly360/setfit-contracts-clauses") | |
import spacy | |
# Load the English model from SpaCy | |
nlp = spacy.load("en_core_web_md") | |
def split_into_sentences_with_offsets(text): | |
""" | |
Splits a paragraph into sentences and returns them along with their start and end offsets. | |
:param text: The input text to be split into sentences. | |
:return: A list of tuples, each containing a sentence and its start and end offsets. | |
""" | |
doc = nlp(text) | |
return [(sent.text, sent.start_char, sent.end_char) for sent in doc.sents] | |
def util_upload_file_and_return_list_docs(uploaded_files): | |
#util_del_cwd() | |
list_docs = [] | |
list_save_path = [] | |
for uploaded_file in uploaded_files: | |
save_path = Path(os.getcwd(), uploaded_file.name) | |
with open(save_path, mode='wb') as w: | |
w.write(uploaded_file.getvalue()) | |
#print('save_path:', save_path) | |
docs = fitz.open(save_path) | |
list_docs.append(docs) | |
list_save_path.append(save_path) | |
return(list_docs, list_save_path) | |
#### Helper Functions to Split using Rolling Window (recomm : use smaller rolling window ) | |
def split_txt_file_synthetic_sentence_rolling(ctxt, sentence_size_in_chars, sliding_size_in_chars,debug=False): | |
sliding_size_in_chars = sentence_size_in_chars - sliding_size_in_chars | |
pos_start = 0 | |
pos_end = len(ctxt) | |
final_return = [] | |
if(debug): | |
print('pos_start : ',pos_start) | |
print('pos_end : ',pos_end) | |
if(pos_end<sentence_size_in_chars): | |
return([{'section_org_text':ctxt[pos_start:pos_end],'section_char_start':pos_start,'section_char_end':pos_end}]) | |
if(sentence_size_in_chars<sliding_size_in_chars): | |
return(None) | |
stop_condition = False | |
start = pos_start | |
end = start + sentence_size_in_chars | |
mydict = {} | |
mydict['section_org_text'] = ctxt[start:end] | |
mydict['section_char_start'] = start | |
mydict['section_char_end'] = end | |
final_return.append(mydict) | |
#### First Time ENDS | |
while(stop_condition==False): | |
start = end - sliding_size_in_chars | |
end = start + sentence_size_in_chars | |
if(end>pos_end): | |
if(start<pos_end): | |
end = pos_end | |
mydict = {} | |
mydict['section_org_text'] = ctxt[start:end] | |
mydict['section_char_start'] = start | |
mydict['section_char_end'] = end | |
final_return.append(mydict) | |
stop_condition=True | |
else: | |
stop_condition=True | |
else: | |
mydict = {} | |
mydict['section_org_text'] = ctxt[start:end] | |
mydict['section_char_start'] = start | |
mydict['section_char_end'] = end | |
final_return.append(mydict) | |
if(debug): | |
print('start : ', start) | |
print('end : ', end) | |
return(final_return) | |
def util_get_list_page_and_passage(list_docs, list_save_path): | |
#page_documents = [] | |
passage_documents = [] | |
for ind_doc, docs in enumerate(list_docs): | |
for txt_index, txt_page in enumerate(docs): | |
page_document = txt_page.get_text()##.encode("utf8") # get plain text (is in UTF-8) | |
#page_documents.append(page_document) | |
sections = split_into_sentences_with_offsets(page_document) | |
for sub_sub_index, sub_sub_item in enumerate(sections): | |
sub_text=sub_sub_item[0] | |
passage_document = Document(page_content=sub_text, metadata={"page_content": page_document,"page_index": txt_index, "file_name" : str(list_save_path[ind_doc])}) | |
passage_documents.append(passage_document) | |
return(passage_documents) | |
# def util_index_chromadb_passages(): | |
# ##### PROCESSING | |
# # create client and a new collection | |
# collection_name = str(uuid.uuid4().hex) | |
# chroma_client = chromadb.EphemeralClient() | |
# chroma_collection = chroma_client.get_or_create_collection(collection_name) | |
# # define embedding function | |
# embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name="BAAI/bge-small-en")) | |
# vector_store = ChromaVectorStore(chroma_collection=chroma_collection) | |
# return(chroma_client,chroma_collection,collection_name,vector_store,embed_model) | |
def util_get_only_content_inside_loop(page_no,page_documents): | |
for index, item in enumerate(page_documents): | |
if(page_documents[index].metadata['txt_page_index']==page_no): | |
return(page_documents[index].get_content()) | |
return(None) | |
passage_documents = [] | |
with st.form("my_form"): | |
multi = '''1. Download and Upload Multiple contracts | |
e.g. https://www.barc.gov.in/tenders/GCC-LPS.pdf | |
e.g. https://www.montrosecounty.net/DocumentCenter/View/823/Sample-Construction-Contract | |
''' | |
st.markdown(multi) | |
multi = ''' ''' | |
st.markdown(multi) | |
multi = '''2. Press Calculate.''' | |
st.markdown(multi) | |
multi = ''' | |
** Attempt is made sentence Wise ** \n | |
''' | |
st.markdown(multi) | |
#uploaded_file = st.file_uploader("Choose a file") | |
list_docs = [] | |
list_save_path = [] | |
uploaded_files = st.file_uploader("Choose file(s)", accept_multiple_files=True) | |
print('uploaded_files ', uploaded_files) | |
submitted = st.form_submit_button("Calculate") | |
my_list_structure = [] | |
import pandas as pd | |
if submitted and (uploaded_files is not None): | |
list_docs, list_save_path = util_upload_file_and_return_list_docs(uploaded_files) | |
# print('list_docs ' ,list_docs) | |
# print('list_save_path ' , list_save_path) | |
passage_documents = util_get_list_page_and_passage(list_docs, list_save_path) | |
for passage_document in passage_documents: | |
text = passage_document.page_content | |
metadata = passage_document.metadata | |
preds = clause_model(text) | |
my_list_structure.append({"text": text, "metadata": metadata,"preds":preds }) | |
df = pd.DataFrame(my_list_structure) | |
df | |