import datetime import operator import datasets import pandas as pd import tqdm.auto from apscheduler.schedulers.background import BackgroundScheduler from huggingface_hub import HfApi from ragatouille import RAGPretrainedModel api = HfApi() INDEX_REPO_ID = "hysts-bot-data/daily-papers-abstract-index" INDEX_DIR_PATH = ".ragatouille/colbert/indexes/daily-papers-abstract-index/" api.snapshot_download( repo_id=INDEX_REPO_ID, repo_type="dataset", local_dir=INDEX_DIR_PATH, ) abstract_retriever = RAGPretrainedModel.from_index(INDEX_DIR_PATH) # Run once to initialize the retriever abstract_retriever.search("LLM") def update_abstract_index() -> None: global abstract_retriever api.snapshot_download( repo_id=INDEX_REPO_ID, repo_type="dataset", local_dir=INDEX_DIR_PATH, ) abstract_retriever = RAGPretrainedModel.from_index(INDEX_DIR_PATH) abstract_retriever.search("LLM") scheduler = BackgroundScheduler() scheduler.add_job(func=update_abstract_index, trigger="cron", hour="*", timezone="UTC", misfire_grace_time=3 * 60) scheduler.start() def get_df() -> pd.DataFrame: df = pd.merge( left=datasets.load_dataset("hysts-bot-data/daily-papers", split="train").to_pandas(), right=datasets.load_dataset("hysts-bot-data/daily-papers-stats", split="train").to_pandas(), on="arxiv_id", ) df = df[::-1].reset_index(drop=True) df["date"] = df["date"].dt.strftime("%Y-%m-%d") paper_info = [] for _, row in tqdm.auto.tqdm(df.iterrows(), total=len(df)): info = row.copy() del info["abstract"] info["paper_page"] = f"https://huggingface.co/papers/{row.arxiv_id}" paper_info.append(info) return pd.DataFrame(paper_info) class Prettifier: @staticmethod def get_github_link(link: str) -> str: if not link: return "" return Prettifier.create_link("github", link) @staticmethod def create_link(text: str, url: str) -> str: return f'{text}' @staticmethod def to_div(text: str | None, category_name: str) -> str: if text is None: text = "" class_name = f"{category_name}-{text.lower()}" return f'