tafsir_alsidi / app.py
muhalmutaz
app
b7a035e
raw
history blame
2.08 kB
from sentence_transformers import SentenceTransformer, CrossEncoder, util
import torch
import pickle
import pandas as pd
import gradio as gr
# bi_encoder = SentenceTransformer("microsoft/Multilingual-MiniLM-L12-H384")
cross_encoder = CrossEncoder("cross-encoder/mmarco-mMiniLMv2-L12-H384-v1")
# Corpus from quran
my_file = open("quran-simple-clean.txt", "r",encoding="utf-8")
data = my_file.read()
quran = data.split("\n")
my_file = open("tafsir-simple-clean.txt", "r",encoding="utf-8")
data = my_file.read()
corpus = data.split("\n")
del data
embedder = SentenceTransformer('symanto/sn-xlm-roberta-base-snli-mnli-anli-xnli')
corpus_embeddings = embedder.encode(corpus, convert_to_tensor=True)
def search(query,top_k=100):
print("New query:")
print(query)
ans=[]
##### Sematic Search #####
# Encode the query using the bi-encoder and find potentially relevant passages
question_embedding = embedder.encode(query, convert_to_tensor=True)
hits = util.semantic_search(question_embedding, corpus_embeddings, top_k=top_k)
hits = hits[0] # Get the hits for the first query
##### Re-Ranking #####
# Now, score all retrieved passages with the cross_encoder
cross_inp = [[query, corpus[hit['corpus_id']]] for hit in hits]
cross_scores = cross_encoder.predict(cross_inp)
# Sort results by the cross-encoder scores
for idx in range(len(cross_scores)):
hits[idx]['cross-score'] = cross_scores[idx]
hits = sorted(hits, key=lambda x: x['cross-score'], reverse=True)
for idx, hit in enumerate(hits[0:5]):
ans.append(quran[hit['corpus_id']])
return "\n\n".join(ans)
exp=[""]
desc="هذا البحث يعتمد على تفسير السعدي في البحث."
inp=gr.inputs.Textbox(lines=1, placeholder=None, default="", label="أدخل كلمات البحث هنا")
out=gr.outputs.Textbox(type="auto",label="نتائج البحث")
iface = gr.Interface(fn=search, inputs=inp, outputs=out,examples=exp,article=desc,title="البحث في معاني تفسير السعدي")
iface.launch(share=True)