import gradio as gr import json import re import sqlite3 import logging from collections import defaultdict from util import process_json_files from gematria import calculate_gematria from deep_translator import GoogleTranslator, exceptions from urllib.parse import quote_plus from tqdm import tqdm # Import tqdm for progress bars # Set up logging logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(filename)s - %(lineno)d - %(message)s') # Global variables for database connection, translator, and book names conn = None translator = None book_names = {} # Pre-load Gematria values for common phrases to speed up search gematria_cache = {} # Dictionary to store translations translation_cache = {} def initialize_database(): """Initializes the SQLite database.""" global conn conn = sqlite3.connect('gematria.db', isolation_level=None) # Autocommit for faster insertion cursor = conn.cursor() # Create tables if they don't exist cursor.execute(''' CREATE TABLE IF NOT EXISTS results ( gematria_sum INTEGER, words TEXT, translation TEXT, book TEXT, chapter INTEGER, verse INTEGER, PRIMARY KEY (gematria_sum, words, book, chapter, verse) ) ''') cursor.execute(''' CREATE TABLE IF NOT EXISTS processed_books ( book TEXT PRIMARY KEY, max_phrase_length INTEGER ) ''') cursor.execute(''' CREATE TABLE IF NOT EXISTS translations ( hebrew_phrase TEXT PRIMARY KEY, english_translation TEXT ) ''') def initialize_translator(): """Initializes the Google Translator.""" global translator translator = GoogleTranslator(source='iw', target='en') logging.info("Translator initialized.") def populate_database(start_book, end_book, max_phrase_length=1): """Populates the database with phrases from the Tanach and their Gematria values.""" global conn, book_names logging.info(f"Populating database with books from {start_book} to {end_book}...") cursor = conn.cursor() for book_id in tqdm(range(start_book, end_book + 1), desc="Processing Books"): book_data = process_json_files(book_id, book_id) # Get data for the single book # process_json_files returns a dictionary with book_id as key, # so access the book data directly if book_id in book_data: book_data = book_data[book_id] if 'title' not in book_data or not isinstance(book_data['title'], str): logging.warning(f"Skipping book {book_id} due to missing or invalid 'title' field.") continue title = book_data['title'] book_names[book_id] = title # Check if the book is already processed for this max_phrase_length cursor.execute('''SELECT max_phrase_length FROM processed_books WHERE book = ?''', (title,)) result = cursor.fetchone() if result and result[0] >= max_phrase_length: logging.info(f"Skipping book {title}: Already processed with max_phrase_length {result[0]}") continue logging.info(f"Processing book {title} with max_phrase_length {max_phrase_length}") if 'text' not in book_data or not isinstance(book_data['text'], list): logging.warning(f"Skipping book {book_id} due to missing or invalid 'text' field.") continue chapters = book_data['text'] # Faster iteration with enumerate and list comprehension for chapter_id, chapter in enumerate(chapters): for verse_id, verse in enumerate(chapter): verse_text = flatten_text(verse) # Remove text in square brackets and non-Hebrew characters verse_text = re.sub(r'\[.*?\]', '', verse_text) verse_text = re.sub(r"[^\u05D0-\u05EA ]+", "", verse_text) verse_text = re.sub(r" +", " ", verse_text) words = verse_text.split() # Use a generator to avoid building large lists in memory for length in range(1, max_phrase_length + 1): for start in range(len(words) - length + 1): phrase_candidate = " ".join(words[start:start + length]) gematria_sum = calculate_gematria(phrase_candidate.replace(" ", "")) yield gematria_sum, phrase_candidate, title, chapter_id + 1, verse_id + 1 # Mark the book as processed with the current max_phrase_length cursor.execute(''' INSERT OR REPLACE INTO processed_books (book, max_phrase_length) VALUES (?, ?) ''', (title, max_phrase_length)) def insert_phrases_to_db(phrases): """Inserts a list of phrases into the database efficiently.""" global conn cursor = conn.cursor() # Use executemany to insert multiple rows at once cursor.executemany(''' INSERT OR IGNORE INTO results (gematria_sum, words, book, chapter, verse) VALUES (?, ?, ?, ?, ?) ''', phrases) # Commit the changes outside the loop for better performance conn.commit() def get_translation(phrase): """Retrieves or generates the English translation of a Hebrew phrase.""" global translator, conn, translation_cache if phrase in translation_cache: return translation_cache[phrase] else: cursor = conn.cursor() cursor.execute(''' SELECT english_translation FROM translations WHERE hebrew_phrase = ? ''', (phrase,)) result = cursor.fetchone() if result and result[0]: translation = result[0] return translation else: translation = translate_and_store(phrase) cursor.execute(''' INSERT OR IGNORE INTO translations (hebrew_phrase, english_translation) VALUES (?, ?) ''', (phrase, translation)) return translation def translate_and_store(phrase): """Translates a Hebrew phrase to English using Google Translate and handles potential errors.""" global translator max_retries = 3 retries = 0 while retries < max_retries: try: translation = translator.translate(phrase) logging.debug(f"Translated phrase: {translation}") return translation except (exceptions.TranslationNotFound, exceptions.NotValidPayload, exceptions.ServerException, exceptions.RequestError, requests.exceptions.ConnectionError) as e: retries += 1 logging.warning(f"Error translating phrase '{phrase}': {e}. Retrying... ({retries}/{max_retries})") logging.error(f"Failed to translate phrase '{phrase}' after {max_retries} retries.") return "[Translation Error]" def search_gematria_in_db(gematria_sum, max_words): """Searches the database for phrases with a given Gematria value and word count. Returns phrases with word count <= max_words.""" global conn cursor = conn.cursor() cursor.execute(''' SELECT words, book, chapter, verse FROM results WHERE gematria_sum = ? ''', (gematria_sum,)) # Retrieve all matching phrases first results = cursor.fetchall() filtered_results = [] for words, book, chapter, verse in results: # Filter by word count (including phrases with fewer words) word_count = words.count(' ') + 1 # Count spaces to get word count if word_count <= word_count: # Include phrases with word count <= max_words filtered_results.append((words, book, chapter, verse)) logging.debug(f"Found {len(filtered_results)} matching phrases for Gematria: {gematria_sum} after filtering.") return filtered_results def gematria_search_interface(phrase, max_words, show_translation): """The main function for the Gradio interface.""" if not phrase.strip(): return "Please enter a phrase." global conn, book_names, gematria_cache conn = sqlite3.connect('gematria.db') cursor = conn.cursor() phrase_gematria = calculate_gematria(phrase.replace(" ", "")) logging.info(f"Searching for phrases with Gematria: {phrase_gematria}") # Debugging output logging.debug(f"Phrase Gematria: {phrase_gematria}") logging.debug(f"Max Words: {max_words}") # Check if Gematria is in cache if phrase_gematria in gematria_cache: matching_phrases = gematria_cache[phrase_gematria] logging.debug(f"Retrieved matching phrases from cache.") else: # Search in the database matching_phrases = search_gematria_in_db(phrase_gematria, max_words) # Cache the results for future searches gematria_cache[phrase_gematria] = matching_phrases logging.debug(f"Retrieved matching phrases from database.") if not matching_phrases: return "No matching phrases found." # Sort results by book, chapter, and verse sorted_phrases = sorted(matching_phrases, key=lambda x: (int(list(book_names.keys())[list(book_names.values()).index(x[1])]), x[2], x[3])) logging.debug(f"Sorted matching phrases: {sorted_phrases}") # Group results by book results_by_book = defaultdict(list) for words, book, chapter, verse in sorted_phrases: results_by_book[book].append((words, chapter, verse)) logging.debug(f"Grouped results by book: {results_by_book}") # Format results for display results = [] results.append("
") for book, phrases in results_by_book.items(): results.append(f"

Book: {book}

") # Directly display book name for words, chapter, verse in phrases: translation = get_translation(words) if show_translation else "" link = f"https://www.biblegateway.com/passage/?search={quote_plus(book)}+{chapter}%3A{verse}&version=CJB" results.append(f"""

Chapter: {chapter}, Verse: {verse}

Hebrew Phrase: {words}

Translation: {translation}

[See on Bible Gateway]
""") results.append("
") # Close results-container div conn.close() # Add CSS styling style = """ """ return style + "\n".join(results) def flatten_text(text): """Helper function to flatten nested lists into a single list.""" if isinstance(text, list): return " ".join(flatten_text(item) if isinstance(item, list) else item for item in text) return text def run_app(): """Initializes and launches the Gradio app.""" initialize_database() initialize_translator() # Pre-populate the database logging.info("Starting database population...") phrases_to_insert = [] # Collect phrases before inserting in bulk for max_phrase_length in range(1, 6): # Populate for phrases up to 5 words for gematria_sum, phrase, book, chapter, verse in tqdm(populate_database(1, 39, max_phrase_length=max_phrase_length), desc=f"Populating Database (Max Length: {max_phrase_length})"): # Books 1 to 39 phrases_to_insert.append((gematria_sum, phrase, book, chapter, verse)) if len(phrases_to_insert) >= 1000: # Insert in batches of 1000 for efficiency insert_phrases_to_db(phrases_to_insert) phrases_to_insert = [] if phrases_to_insert: # Insert remaining phrases insert_phrases_to_db(phrases_to_insert) logging.info("Database population complete.") iface = gr.Interface( fn=gematria_search_interface, inputs=[ gr.Textbox(label="Enter phrase"), gr.Number(label="Max Word Count in Results", value=1, minimum=1, maximum=10), gr.Checkbox(label="Show Translation", value=True) ], outputs=gr.HTML(label="Results"), title="Gematria Search in Tanach", description="Search for phrases in the Tanach that have the same Gematria value.", live=False, allow_flagging="never" ) iface.launch() if __name__ == "__main__": run_app()