File size: 6,120 Bytes
22b8e0b cc5c327 22b8e0b 72e4dad cc5c327 2a8e40d 7d78a3b 22b8e0b 72e4dad 22b8e0b 72e4dad 22b8e0b 72e4dad 22b8e0b 72e4dad cc5c327 72e4dad 8c4c590 72e4dad 22b8e0b 72e4dad 22b8e0b f9949bb a4bf4e8 fb38e55 1984bd1 cc5c327 72e4dad a4bf4e8 72e4dad f9949bb 72e4dad cc5c327 72e4dad f9949bb 72e4dad cc5c327 a4bf4e8 f9949bb 72e4dad 7d78a3b a4bf4e8 99ae6d0 7d78a3b 99ae6d0 7d78a3b 99ae6d0 908bb07 2ce67a7 908bb07 2ce67a7 908bb07 a4bf4e8 048a702 22b8e0b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
# set path
import glob, os, sys;
sys.path.append('../utils')
import streamlit as st
import json
import logging
from utils.lexical_search import runLexicalPreprocessingPipeline, lexical_search
from utils.semantic_search import runSemanticPreprocessingPipeline, semantic_search
from utils.checkconfig import getconfig
# Declare all the necessary variables
config = getconfig('paramconfig.cfg')
split_by = config.get('semantic_search','SPLIT_BY')
split_length = int(config.get('semantic_search','SPLIT_LENGTH'))
split_overlap = int(config.get('semantic_search','SPLIT_OVERLAP'))
split_respect_sentence_boundary = bool(int(config.get('semantic_search','RESPECT_SENTENCE_BOUNDARY')))
remove_punc = bool(int(config.get('semantic_search','REMOVE_PUNC')))
embedding_model = config.get('semantic_search','RETRIEVER')
embedding_model_format = config.get('semantic_search','RETRIEVER_FORMAT')
embedding_layer = int(config.get('semantic_search','RETRIEVER_EMB_LAYER'))
retriever_top_k = int(config.get('semantic_search','RETRIEVER_TOP_K'))
reader_model = config.get('semantic_search','READER')
reader_top_k = int(config.get('semantic_search','RETRIEVER_TOP_K'))
def app():
with st.container():
st.markdown("<h1 style='text-align: center; \
color: black;'> Search</h1>",
unsafe_allow_html=True)
st.write(' ')
st.write(' ')
with st.expander("ℹ️ - About this app", expanded=False):
st.write(
"""
The *Keyword Search* app is an easy-to-use interface \
built in Streamlit for doing keyword search in \
policy document - developed by GIZ Data and the \
Sustainable Development Solution Network.
""")
st.markdown("")
with st.sidebar:
with open('docStore/sample/keywordexample.json','r') as json_file:
keywordexample = json.load(json_file)
genre = st.radio("Select Keyword Category", list(keywordexample.keys()))
if genre == 'Food':
keywordList = keywordexample['Food']
elif genre == 'Climate':
keywordList = keywordexample['Climate']
elif genre == 'Social':
keywordList = keywordexample['Social']
elif genre == 'Nature':
keywordList = keywordexample['Nature']
elif genre == 'Implementation':
keywordList = keywordexample['Implementation']
else:
keywordList = None
searchtype = st.selectbox("Do you want to find exact macthes or similar \
meaning/context",
['Exact Matches', 'Similar context/meaning'])
st.markdown("---")
with st.container():
if keywordList is not None:
queryList = st.text_input("You selcted the {} category we \
will look for these keywords in document".format(genre),
value="{}".format(keywordList))
else:
queryList = st.text_input("Please enter here your question and we \
will look for an answer in the document\
OR enter the keyword you are looking \
for and we will we will look for similar\
context in the document.",
placeholder="Enter keyword here")
if st.button("Find them"):
if queryList == "":
st.info("🤔 No keyword provided, if you dont have any, \
please try example sets from sidebar!")
logging.warning("Terminated as no keyword provided")
else:
if 'filepath' in st.session_state:
if searchtype == 'Exact Matches':
# allDocuments = runLexicalPreprocessingPipeline(
# st.session_state['filepath'],
# st.session_state['filename'])
# logging.info("performing lexical search")
# with st.spinner("Performing Exact matching search \
# (Lexical search) for you"):
# st.markdown("##### Top few lexical search (TFIDF) hits #####")
# lexical_search(queryList,allDocuments['documents'])
pass
else:
allDocuments = runSemanticPreprocessingPipeline(
file_path= st.session_state['filepath'],
file_name = st.session_state['filename'],
split_by=split_by,
split_length= split_length,
split_overlap=split_overlap,
removePunc= remove_punc,
split_respect_sentence_boundary=split_respect_sentence_boundary)
logging.info("starting semantic search")
with st.spinner("Performing Similar/Contextual search"):
semantic_search(query = queryList,
documents = allDocuments['documents'],
embedding_model=embedding_model,
embedding_layer=embedding_layer,
embedding_model_format=embedding_model_format,
reader_model=reader_model,reader_top_k=reader_top_k,
retriever_top_k=retriever_top_k)
else:
st.info("🤔 No document found, please try to upload it at the sidebar!")
logging.warning("Terminated as no document provided")
|