# File: dataset-viewer-main/front/admin_ui/app.py import json import os import urllib.parse from itertools import product import duckdb import gradio as gr import huggingface_hub as hfh import matplotlib import matplotlib.pyplot as plt import networkx as nx import pandas as pd import requests from libcommon.processing_graph import processing_graph from tqdm.contrib.concurrent import thread_map matplotlib.use('SVG') DEV = os.environ.get('DEV', False) HF_ENDPOINT = os.environ.get('HF_ENDPOINT', 'https://huggingface.co') PROD_DV_ENDPOINT = os.environ.get('PROD_DV_ENDPOINT', 'https://datasets-server.huggingface.co') DEV_DV_ENDPOINT = os.environ.get('DEV_DV_ENDPOINT', 'http://localhost:8100') ADMIN_HF_ORGANIZATION = os.environ.get('ADMIN_HF_ORGANIZATION', 'huggingface') HF_TOKEN = os.environ.get('HF_TOKEN') DV_ENDPOINT = DEV_DV_ENDPOINT if DEV else PROD_DV_ENDPOINT pending_jobs_df = None def healthcheck(): try: response = requests.head(f'{DV_ENDPOINT}/admin/healthcheck', timeout=10) except requests.ConnectionError as error: return f'❌ Failed to connect to {DV_ENDPOINT} (error {error})' if response.status_code == 200: return f'*Connected to {DV_ENDPOINT}*' else: return f'❌ Failed to connect to {DV_ENDPOINT} (error {response.status_code})' def draw_graph(width, height): graph = processing_graph._nx_graph pos = nx.nx_agraph.graphviz_layout(graph, prog='dot') fig = plt.figure(figsize=(width, height)) nx.draw_networkx(graph, pos=pos, node_color='#d1b2f8', node_size=500) return fig with gr.Blocks() as demo: gr.Markdown('## Datasets-server admin page') gr.Markdown(healthcheck) with gr.Row(visible=HF_TOKEN is None) as auth_page: with gr.Column(): auth_title = gr.Markdown('Enter your token ([settings](https://huggingface.co/settings/tokens)):') token_box = gr.Textbox(HF_TOKEN or '', label='token', placeholder='hf_xxx', type='password') auth_error = gr.Markdown('', visible=False) with gr.Row(visible=HF_TOKEN is not None) as main_page: with gr.Column(): welcome_title = gr.Markdown('### Welcome') with gr.Tab('Home dashboard'): home_dashboard_fetch_button = gr.Button('Fetch') gr.Markdown('### Dataset infos') home_dashboard_trending_datasets_infos_by_builder_name_table = gr.DataFrame(pd.DataFrame({'Builder name': [], 'Count': [], '% of all datasets with infos': [], '% of all public datasets': []})) gr.Markdown('### Trending datasets coverage (is-valid)') home_dashboard_trending_datasets_coverage_stats_table = gr.DataFrame(pd.DataFrame({'Num trending datasets': [], 'HTTP Status': [], 'Preview': [], 'Viewer': [], 'Search': [], 'Filter': [], 'Statistics': []})) home_dashboard_trending_datasets_coverage_table = gr.DataFrame(pd.DataFrame({'All trending datasets': [], 'HTTP Status': [], 'Preview': [], 'Viewer': [], 'Search': [], 'Filter': [], 'Statistics': []})) def fetch_home_dashboard(token): out = {home_dashboard_trending_datasets_infos_by_builder_name_table: gr.DataFrame(value=None), home_dashboard_trending_datasets_coverage_stats_table: gr.DataFrame(value=None), home_dashboard_trending_datasets_coverage_table: gr.DataFrame(value=None)} headers = {'Authorization': f'Bearer {token}'} response = requests.get(f'{DV_ENDPOINT}/admin/num-dataset-infos-by-builder-name', headers=headers, timeout=60) if response.status_code == 200: num_infos_by_builder_name = response.json() total_num_infos = sum(num_infos_by_builder_name.values()) num_public_datasets = sum((1 for _ in hfh.HfApi(endpoint=HF_ENDPOINT).list_datasets())) out[home_dashboard_trending_datasets_infos_by_builder_name_table] = gr.DataFrame(visible=True, value=pd.DataFrame({'Builder name': list(num_infos_by_builder_name.keys()), 'Count': list(num_infos_by_builder_name.values()), '% of all datasets with infos': [f'{round(100 * num_infos / total_num_infos, 2)}%' for num_infos in num_infos_by_builder_name.values()], '% of all public datasets': [f'{round(100 * num_infos / num_public_datasets, 2)}%' for num_infos in num_infos_by_builder_name.values()]})) else: out[home_dashboard_trending_datasets_infos_by_builder_name_table] = gr.DataFrame(visible=True, value=pd.DataFrame({'Error': [f'❌ Failed to fetch dataset infos from {DV_ENDPOINT} (error {response.status_code})']})) response = requests.get(f'{HF_ENDPOINT}/api/trending?type=dataset&limit=20', timeout=60) if response.status_code == 200: trending_datasets = [repo_info['repoData']['id'] for repo_info in response.json()['recentlyTrending']] def get_is_valid_response(dataset: str): return requests.get(f'{DV_ENDPOINT}/is-valid?dataset={dataset}', headers=headers, timeout=60) is_valid_responses = thread_map(get_is_valid_response, trending_datasets, desc='get_is_valid_response') trending_datasets_coverage = {'All trending datasets': []} error_datasets = [] unauthorized_datasets = [] for (dataset, is_valid_response) in zip(trending_datasets, is_valid_responses): if is_valid_response.status_code == 200: response_json = is_valid_response.json() trending_datasets_coverage['All trending datasets'].append(dataset) for is_valid_field in response_json: pretty_field = is_valid_field.replace('_', ' ').capitalize() if pretty_field not in trending_datasets_coverage: trending_datasets_coverage[pretty_field] = [] trending_datasets_coverage[pretty_field].append('✅' if response_json[is_valid_field] is True else '❌') elif is_valid_response.status_code == 500: error_datasets.append(dataset) else: unauthorized_datasets.append(dataset) def fill_empty_cells(datasets, sign): trending_datasets_coverage['All trending datasets'] += datasets for pretty_field in trending_datasets_coverage: trending_datasets_coverage[pretty_field] += [sign] * (len(trending_datasets_coverage['All trending datasets']) - len(trending_datasets_coverage[pretty_field])) fill_empty_cells(error_datasets, '❌') fill_empty_cells(unauthorized_datasets, '🚫') out[home_dashboard_trending_datasets_coverage_table] = gr.DataFrame(visible=True, value=pd.DataFrame(trending_datasets_coverage)) trending_datasets_coverage_stats = {'Num trending datasets': [len(trending_datasets)], **{is_valid_field: [f"{round(100 * sum((1 for coverage in trending_datasets_coverage[is_valid_field] if coverage == '✅')) / len(trending_datasets), 2)}%"] for is_valid_field in trending_datasets_coverage if is_valid_field != 'All trending datasets'}} out[home_dashboard_trending_datasets_coverage_stats_table] = gr.DataFrame(visible=True, value=pd.DataFrame(trending_datasets_coverage_stats)) else: out[home_dashboard_trending_datasets_coverage_table] = gr.DataFrame(visible=True, value=pd.DataFrame({'Error': [f'❌ Failed to fetch trending datasets from {HF_ENDPOINT} (error {response.status_code})']})) return out home_dashboard_fetch_button.click(fetch_home_dashboard, inputs=[token_box], outputs=[home_dashboard_trending_datasets_infos_by_builder_name_table, home_dashboard_trending_datasets_coverage_stats_table, home_dashboard_trending_datasets_coverage_table]) with gr.Tab('View pending jobs'): fetch_pending_jobs_button = gr.Button('Fetch pending jobs') gr.Markdown('### Pending jobs summary') pending_jobs_summary_table = gr.DataFrame(pd.DataFrame({'Jobs': [], 'Waiting': [], 'Started': []})) gr.Markdown('### Most recent') recent_pending_jobs_table = gr.DataFrame() gr.Markdown('### Query the pending jobs table') pending_jobs_query = gr.Textbox(label='Query pending_jobs_df', placeholder="SELECT * FROM pending_jobs_df WHERE dataset LIKE 'allenai/c4", value="SELECT * FROM pending_jobs_df WHERE dataset LIKE 'allenai/c4'", lines=3) query_pending_jobs_button = gr.Button('Run') pending_jobs_query_result_df = gr.DataFrame() def view_jobs(token): global pending_jobs_df headers = {'Authorization': f'Bearer {token}'} response = requests.get(f'{DV_ENDPOINT}/admin/pending-jobs', headers=headers, timeout=60) if response.status_code == 200: pending_jobs = response.json() df = pd.DataFrame([job for job_type in pending_jobs for job_state in pending_jobs[job_type] for job in pending_jobs[job_type][job_state]]) if 'started_at' in df.columns: df['started_at'] = pd.to_datetime(df['started_at'], errors='coerce') if 'last_heartbeat' in df.columns: df['last_heartbeat'] = pd.to_datetime(df['last_heartbeat'], errors='coerce') if 'created_at' in df.columns: df['created_at'] = pd.to_datetime(df['created_at'], errors='coerce') most_recent = df.nlargest(5, 'created_at') else: most_recent = pd.DataFrame() pending_jobs_df = df return {pending_jobs_summary_table: gr.DataFrame(visible=True, value=pd.DataFrame({'Jobs': list(pending_jobs), 'Waiting': [len(pending_jobs[job_type]['waiting']) for job_type in pending_jobs], 'Started': [len(pending_jobs[job_type]['started']) for job_type in pending_jobs]})), recent_pending_jobs_table: gr.DataFrame(value=most_recent)} else: return {pending_jobs_summary_table: gr.DataFrame(visible=True, value=pd.DataFrame({'Error': [f'❌ Failed to view pending jobs to {DV_ENDPOINT} (error {response.status_code})']})), recent_pending_jobs_table: gr.DataFrame(value=None)} def query_jobs(pending_jobs_query): global pending_jobs_df if pending_jobs_df is None: return {pending_jobs_query_result_df: gr.DataFrame(value=pd.DataFrame({'Error': ['❌ Please, fetch the pending jobs first']}))} try: result = duckdb.query(pending_jobs_query).to_df() except (duckdb.ParserException, duckdb.CatalogException, duckdb.BinderException) as error: return {pending_jobs_query_result_df: gr.DataFrame(value=pd.DataFrame({'Error': [f'❌ {str(error)}']}))} return {pending_jobs_query_result_df: gr.DataFrame(value=result)} fetch_pending_jobs_button.click(view_jobs, inputs=token_box, outputs=[recent_pending_jobs_table, pending_jobs_summary_table]) query_pending_jobs_button.click(query_jobs, inputs=pending_jobs_query, outputs=[pending_jobs_query_result_df]) with gr.Tab('Refresh dataset step'): job_types = [processing_step.job_type for processing_step in processing_graph.get_alphabetically_ordered_processing_steps()] def on_change_refresh_job_type(job_type): return processing_graph.get_processing_step(job_type).difficulty refresh_type = gr.Dropdown(job_types, multiselect=False, type='value', label='job type', value=job_types[0]) refresh_dataset_name = gr.Textbox(label='dataset', placeholder='allenai/c4') refresh_config_name = gr.Textbox(label='config (optional)', placeholder='en') refresh_split_name = gr.Textbox(label='split (optional)', placeholder='train, test') gr.Markdown("*you can select multiple values by separating them with commas, e.g. split='train, test'*") refresh_difficulty = gr.Slider(0, 100, processing_graph.get_processing_step(job_types[0]).difficulty, step=10, interactive=True, label='difficulty') refresh_type.change(on_change_refresh_job_type, refresh_type, refresh_difficulty) refresh_priority = gr.Dropdown(['low', 'normal', 'high'], multiselect=False, label='priority', value='high') refresh_dataset_button = gr.Button('Force refresh dataset') refresh_dataset_output = gr.Markdown('') def refresh_dataset(token, refresh_type, refresh_dataset_names, refresh_config_names, refresh_split_names, refresh_priority, refresh_difficulty): headers = {'Authorization': f'Bearer {token}'} all_results = '' for (refresh_dataset_name, refresh_config_name, refresh_split_name) in product(refresh_dataset_names.split(','), refresh_config_names.split(','), refresh_split_names.split(',')): refresh_dataset_name = refresh_dataset_name.strip() params = {'dataset': refresh_dataset_name, 'priority': refresh_priority} if refresh_config_name: refresh_config_name = refresh_config_name.strip() params['config'] = refresh_config_name if refresh_split_name: refresh_split_name = refresh_split_name.strip() params['split'] = refresh_split_name if refresh_difficulty: params['difficulty'] = refresh_difficulty params = urllib.parse.urlencode(params) response = requests.post(f'{DV_ENDPOINT}/admin/force-refresh/{refresh_type}?{params}', headers=headers, timeout=60) if response.status_code == 200: result = f"[{refresh_dataset_name}] ✅ Added processing step to the queue: '{refresh_type}'" if refresh_config_name: result += f", for config '{refresh_config_name}'" if refresh_split_name: result += f", for split '{refresh_split_name}'" else: result = f'[{refresh_dataset_name}] ❌ Failed to add processing step to the queue. Error {response.status_code}' try: if response.json().get('error'): result += f": {response.json()['error']}" except requests.JSONDecodeError: result += f': {response.content}' all_results += result.strip('\n') + '\n' return '```\n' + all_results + '\n```' refresh_dataset_button.click(refresh_dataset, inputs=[token_box, refresh_type, refresh_dataset_name, refresh_config_name, refresh_split_name, refresh_priority, refresh_difficulty], outputs=refresh_dataset_output) with gr.Tab('Recreate dataset'): delete_and_recreate_dataset_name = gr.Textbox(label='dataset', placeholder='stanfordnlp/imdb') delete_and_recreate_priority = gr.Dropdown(['low', 'normal', 'high'], multiselect=False, label='priority', value='high') gr.Markdown('Beware: this will delete all the jobs, cache entries and assets for the dataset (for all the revisions). The dataset viewer will be unavailable until the cache is rebuilt.') delete_and_recreate_dataset_button = gr.Button('Delete and recreate') delete_and_recreate_dataset_output = gr.Markdown('') def delete_and_recreate_dataset(token, delete_and_recreate_dataset_name, delete_and_recreate_priority): headers = {'Authorization': f'Bearer {token}'} delete_and_recreate_dataset_name = delete_and_recreate_dataset_name.strip() params = {'dataset': delete_and_recreate_dataset_name, 'priority': delete_and_recreate_priority} params = urllib.parse.urlencode(params) response = requests.post(f'{DV_ENDPOINT}/admin/recreate-dataset?{params}', headers=headers, timeout=60) if response.status_code == 200: result = f'[{delete_and_recreate_dataset_name}] ✅ All the assets have been deleted. A new job has been created to generate the cache again.' else: result = f'[{refresh_dataset_name}] ❌ Failed to delete and recreate the dataset. Error {response.status_code}' try: if response.json().get('error'): result += f": {response.json()['error']}" except requests.JSONDecodeError: result += f': {response.content}' return result.strip('\n') + '\n' delete_and_recreate_dataset_button.click(delete_and_recreate_dataset, inputs=[token_box, delete_and_recreate_dataset_name, delete_and_recreate_priority], outputs=delete_and_recreate_dataset_output) with gr.Tab('Dataset status'): dataset_name = gr.Textbox(label='dataset', placeholder='allenai/c4') dataset_status_button = gr.Button('Get dataset status') gr.Markdown('### Pending jobs') jobs_table = gr.DataFrame() gr.Markdown('### Cached responses') cached_responses_table = gr.DataFrame() def get_dataset_status(token, dataset): headers = {'Authorization': f'Bearer {token}'} response = requests.get(f'{DV_ENDPOINT}/admin/dataset-status?dataset={dataset}', headers=headers, timeout=60) if response.status_code == 200: dataset_status = response.json() cached_responses_df = pd.DataFrame([{'kind': cached_response['kind'], 'dataset': cached_response['dataset'], 'config': cached_response['config'], 'split': cached_response['split'], 'http_status': cached_response['http_status'], 'error_code': cached_response['error_code'], 'job_runner_version': cached_response['job_runner_version'], 'dataset_git_revision': cached_response['dataset_git_revision'], 'progress': cached_response['progress'], 'updated_at': cached_response['updated_at'], 'failed_runs': cached_response['failed_runs'], 'details': json.dumps(cached_response['details'])} for content in dataset_status.values() for cached_response in content['cached_responses']]) jobs_df = pd.DataFrame([{'type': job['type'], 'dataset': job['dataset'], 'revision': job['revision'], 'config': job['config'], 'split': job['split'], 'namespace': job['namespace'], 'priority': job['priority'], 'status': job['status'], 'difficulty': job['difficulty'], 'created_at': job['created_at'], 'started_at': job['started_at'], 'last_heartbeat': job['last_heartbeat']} for content in dataset_status.values() for job in content['jobs']]) return {cached_responses_table: gr.DataFrame(value=cached_responses_df), jobs_table: gr.DataFrame(value=jobs_df)} else: return {cached_responses_table: gr.DataFrame(value=pd.DataFrame([{'error': f'❌ Failed to get status for {dataset} (error {response.status_code})'}])), jobs_table: gr.DataFrame(value=pd.DataFrame([{'content': str(response.content)}]))} dataset_status_button.click(get_dataset_status, inputs=[token_box, dataset_name], outputs=[cached_responses_table, jobs_table]) with gr.Tab('Processing graph'): gr.Markdown("## 💫 Please, don't forget to rebuild (factory reboot) this space immediately after each deploy 💫") gr.Markdown('### so that we get the 🚀 production 🚀 version of the graph here ') with gr.Row(): width = gr.Slider(1, 30, 19, step=1, label='Width') height = gr.Slider(1, 30, 15, step=1, label='Height') output = gr.Plot() draw_button = gr.Button('Plot processing graph') draw_button.click(draw_graph, inputs=[width, height], outputs=output) def auth(token): if not token: return {auth_error: gr.Markdown(value='', visible=False)} try: user = hfh.whoami(token=token) except requests.HTTPError as err: return {auth_error: gr.Markdown(value=f'❌ Error ({err})', visible=True)} orgs = [org['name'] for org in user['orgs']] if ADMIN_HF_ORGANIZATION in orgs: return {auth_page: gr.Row(visible=False), welcome_title: gr.Markdown(value=f"### Welcome {user['name']}"), main_page: gr.Row(visible=True)} else: return {auth_error: gr.Markdown(value=f"❌ Unauthorized (user '{user['name']} is not a member of '{ADMIN_HF_ORGANIZATION}')")} token_box.change(auth, inputs=token_box, outputs=[auth_error, welcome_title, auth_page, main_page]) if __name__ == '__main__': demo.launch() # File: dataset-viewer-main/jobs/cache_maintenance/src/cache_maintenance/backfill.py import concurrent.futures import logging from collections.abc import Iterator from dataclasses import dataclass, field from typing import Optional from libcommon.dtos import Priority from libcommon.operations import OperationsStatistics, backfill_dataset from libcommon.simple_cache import get_all_datasets, get_datasets_with_retryable_errors from libcommon.storage_client import StorageClient MAX_BACKFILL_WORKERS = 8 LOG_BATCH = 100 @dataclass class BackfillStatistics: num_total_datasets: int = 0 num_analyzed_datasets: int = 0 num_error_datasets: int = 0 operations: OperationsStatistics = field(default_factory=OperationsStatistics) def add(self, other: 'BackfillStatistics') -> None: self.num_total_datasets += other.num_total_datasets self.num_analyzed_datasets += other.num_analyzed_datasets self.num_error_datasets += other.num_error_datasets self.operations.add(other.operations) def get_log(self) -> str: return f'{self.num_analyzed_datasets} analyzed datasets (total: {self.num_total_datasets} datasets): {self.operations.num_untouched_datasets} already ok ({100 * self.operations.num_untouched_datasets / self.num_analyzed_datasets:.2f}%), {self.operations.num_backfilled_datasets} backfilled ({100 * self.operations.num_backfilled_datasets / self.num_analyzed_datasets:.2f}%), {self.operations.num_deleted_datasets} deleted ({100 * self.operations.num_deleted_datasets / self.num_analyzed_datasets:.2f}%), {self.num_error_datasets} raised an exception ({100 * self.num_error_datasets / self.num_analyzed_datasets:.2f}%). {self.operations.tasks.get_log()}' def backfill_all_datasets(hf_endpoint: str, blocked_datasets: list[str], hf_token: Optional[str]=None, storage_clients: Optional[list[StorageClient]]=None) -> None: logging.info('backfill datasets in the database and delete non-supported ones') datasets_in_database = get_all_datasets() backfill_datasets(dataset_names=datasets_in_database, hf_endpoint=hf_endpoint, blocked_datasets=blocked_datasets, hf_token=hf_token, storage_clients=storage_clients) def backfill_retryable_errors(hf_endpoint: str, blocked_datasets: list[str], hf_token: Optional[str]=None, storage_clients: Optional[list[StorageClient]]=None) -> None: logging.info('backfill datasets that have a retryable error') dataset_names = get_datasets_with_retryable_errors() backfill_datasets(dataset_names=dataset_names, hf_endpoint=hf_endpoint, blocked_datasets=blocked_datasets, hf_token=hf_token, storage_clients=storage_clients) def try_backfill_dataset(dataset: str, hf_endpoint: str, blocked_datasets: list[str], hf_token: Optional[str]=None, storage_clients: Optional[list[StorageClient]]=None) -> BackfillStatistics: try: return BackfillStatistics(num_analyzed_datasets=1, operations=backfill_dataset(dataset=dataset, hf_endpoint=hf_endpoint, blocked_datasets=blocked_datasets, hf_token=hf_token, priority=Priority.LOW, hf_timeout_seconds=None, storage_clients=storage_clients)) except Exception as e: logging.warning(f'failed to update_dataset {dataset}: {e}') return BackfillStatistics(num_analyzed_datasets=1, num_error_datasets=1) def backfill_datasets(dataset_names: set[str], hf_endpoint: str, blocked_datasets: list[str], hf_token: Optional[str]=None, storage_clients: Optional[list[StorageClient]]=None) -> BackfillStatistics: logging.info(f'analyzing {len(dataset_names)} datasets in the database') statistics = BackfillStatistics(num_total_datasets=len(dataset_names)) def _backfill_dataset(dataset: str) -> BackfillStatistics: return try_backfill_dataset(dataset, hf_endpoint, blocked_datasets, hf_token, storage_clients) with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_BACKFILL_WORKERS) as executor: def get_futures() -> Iterator[concurrent.futures.Future[BackfillStatistics]]: for dataset in dataset_names: yield executor.submit(_backfill_dataset, dataset) for future in concurrent.futures.as_completed(get_futures()): try: dataset_statistics = future.result() except Exception as e: logging.warning(f'Unexpected error: {e}') dataset_statistics = BackfillStatistics(num_total_datasets=1, num_analyzed_datasets=1, num_error_datasets=1) finally: statistics.add(dataset_statistics) logging.debug(statistics.get_log()) if statistics.num_analyzed_datasets % LOG_BATCH == 0: logging.info(statistics.get_log()) logging.info(statistics.get_log()) logging.info('backfill completed') return statistics # File: dataset-viewer-main/jobs/cache_maintenance/src/cache_maintenance/cache_metrics.py import logging from libcommon.simple_cache import CacheTotalMetricDocument, get_responses_count_by_kind_status_and_error_code def collect_cache_metrics() -> None: logging.info('updating cache metrics') new_metric_by_id = get_responses_count_by_kind_status_and_error_code() new_ids = set(new_metric_by_id.keys()) old_ids = set(((metric.kind, metric.http_status, metric.error_code) for metric in CacheTotalMetricDocument.objects())) to_delete = old_ids - new_ids for (kind, http_status, error_code) in to_delete: CacheTotalMetricDocument.objects(kind=kind, http_status=http_status, error_code=error_code).delete() logging.info(f'kind={kind!r} http_status={http_status!r} error_code={error_code!r} has been deleted') for ((kind, http_status, error_code), total) in new_metric_by_id.items(): CacheTotalMetricDocument.objects(kind=kind, http_status=http_status, error_code=error_code).upsert_one(total=total) logging.info(f'kind={kind!r} http_status={http_status!r} error_code={error_code!r}: total={total!r} has been inserted') logging.info('cache metrics have been updated') # File: dataset-viewer-main/jobs/cache_maintenance/src/cache_maintenance/config.py from dataclasses import dataclass, field from typing import Optional from environs import Env from libcommon.config import AssetsConfig, CacheConfig, CachedAssetsConfig, CommonConfig, LogConfig, QueueConfig, S3Config DISCUSSIONS_BOT_ASSOCIATED_USER_NAME = None DISCUSSIONS_BOT_TOKEN = None DISCUSSIONS_PARQUET_REVISION = 'refs/convert/parquet' @dataclass(frozen=True) class DiscussionsConfig: bot_associated_user_name: Optional[str] = DISCUSSIONS_BOT_ASSOCIATED_USER_NAME bot_token: Optional[str] = DISCUSSIONS_BOT_TOKEN parquet_revision: str = DISCUSSIONS_PARQUET_REVISION @classmethod def from_env(cls) -> 'DiscussionsConfig': env = Env(expand_vars=True) with env.prefixed('DISCUSSIONS_'): return cls(bot_associated_user_name=env.str(name='BOT_ASSOCIATED_USER_NAME', default=DISCUSSIONS_BOT_ASSOCIATED_USER_NAME), bot_token=env.str(name='BOT_TOKEN', default=DISCUSSIONS_BOT_TOKEN), parquet_revision=env.str(name='PARQUET_REVISION', default=DISCUSSIONS_PARQUET_REVISION)) DIRECTORY_CLEANING_CACHE_DIRECTORY = None DIRECTORY_CLEANING_SUBFOLDER_PATTERN = '*/datasets/*' DIRECTORY_CLEANING_EXPIRED_TIME_INTERVAL_SECONDS = 3 * 60 * 60 @dataclass(frozen=True) class DirectoryCleaning: cache_directory: Optional[str] = DIRECTORY_CLEANING_CACHE_DIRECTORY subfolder_pattern: str = DIRECTORY_CLEANING_SUBFOLDER_PATTERN expired_time_interval_seconds: int = DIRECTORY_CLEANING_EXPIRED_TIME_INTERVAL_SECONDS @classmethod def from_env(cls) -> 'DirectoryCleaning': env = Env(expand_vars=True) with env.prefixed('DIRECTORY_CLEANING_'): return cls(cache_directory=env.str(name='CACHE_DIRECTORY', default=DIRECTORY_CLEANING_CACHE_DIRECTORY), subfolder_pattern=env.str(name='SUBFOLDER_PATTERN', default=DIRECTORY_CLEANING_SUBFOLDER_PATTERN), expired_time_interval_seconds=env.int(name='EXPIRED_TIME_INTERVAL_SECONDS', default=DIRECTORY_CLEANING_EXPIRED_TIME_INTERVAL_SECONDS)) CACHE_MAINTENANCE_ACTION = None @dataclass(frozen=True) class JobConfig: log: LogConfig = field(default_factory=LogConfig) cache: CacheConfig = field(default_factory=CacheConfig) queue: QueueConfig = field(default_factory=QueueConfig) common: CommonConfig = field(default_factory=CommonConfig) directory_cleaning: DirectoryCleaning = field(default_factory=DirectoryCleaning) discussions: DiscussionsConfig = field(default_factory=DiscussionsConfig) action: Optional[str] = CACHE_MAINTENANCE_ACTION s3: S3Config = field(default_factory=S3Config) assets: AssetsConfig = field(default_factory=AssetsConfig) cached_assets: CachedAssetsConfig = field(default_factory=CachedAssetsConfig) @classmethod def from_env(cls) -> 'JobConfig': env = Env(expand_vars=True) return cls(log=LogConfig.from_env(), cache=CacheConfig.from_env(), queue=QueueConfig.from_env(), common=CommonConfig.from_env(), directory_cleaning=DirectoryCleaning.from_env(), discussions=DiscussionsConfig.from_env(), action=env.str(name='CACHE_MAINTENANCE_ACTION', default=CACHE_MAINTENANCE_ACTION), s3=S3Config.from_env(), assets=AssetsConfig.from_env(), cached_assets=CachedAssetsConfig.from_env()) # File: dataset-viewer-main/jobs/cache_maintenance/src/cache_maintenance/discussions.py import logging from dataclasses import dataclass from typing import Literal, Optional from urllib import parse from huggingface_hub import HfApi from huggingface_hub.constants import REPO_TYPE_DATASET from libcommon.simple_cache import get_datasets_with_last_updated_kind PARQUET_CACHE_KIND = 'config-parquet' DAYS = 1 DISCUSSION_TITLE = '[bot] [No action needed] Conversion to Parquet' DISCUSSION_DESCRIPTION = "The {bot_name} bot has created a version of this dataset in the Parquet format in the {parquet_link} branch.\n\n## What is Parquet?\n\nApache Parquet is a popular columnar storage format known for:\n\n- reduced memory requirement,\n- fast data retrieval and filtering,\n- efficient storage.\n\n**This is what powers the dataset viewer** on each dataset page and every dataset on the Hub can be accessed with the same code (you can use HF Datasets, ClickHouse, DuckDB, Pandas or Polars, [up to you](https://huggingface.co/docs/dataset-viewer/parquet_process)).\n\nYou can learn more about the advantages associated with Parquet in the [documentation](https://huggingface.co/docs/dataset-viewer/parquet).\n\n## How to access the Parquet version of the dataset?\n\nYou can access the Parquet version of the dataset by following this link: {parquet_link}\n\n## What if my dataset was already in Parquet?\n\nWhen the dataset is already in Parquet format, the data are not converted and the files in `refs/convert/parquet` are links to the original files. This rule has an exception to ensure the dataset viewer API to stay fast: if the row group size of the original Parquet files is too big, new Parquet files are generated.\n\n## What should I do?\n\nYou don't need to do anything. The Parquet version of the dataset is available for you to use. Refer to the [documentation](https://huggingface.co/docs/dataset-viewer/parquet_process) for examples and code snippets on how to query the Parquet files with ClickHouse, DuckDB, Pandas or Polars.\n\nIf you have any questions or concerns, feel free to ask in the discussion below. You can also close the discussion if you don't have any questions." @dataclass class ParquetCounters: datasets: int = 0 new_discussions: int = 0 dismissed_discussions: int = 0 errors: int = 0 @dataclass class Counters: parquet: ParquetCounters def post_messages(hf_endpoint: str, bot_associated_user_name: Optional[str], bot_token: Optional[str], parquet_revision: str) -> Counters: if not (bot_associated_user_name and bot_token): raise Exception('No bot token or user name provided, skipping posting messages.') return Counters(parquet=post_messages_on_parquet_conversion(hf_endpoint=hf_endpoint, bot_associated_user_name=bot_associated_user_name, bot_token=bot_token, parquet_revision=parquet_revision)) def post_messages_on_parquet_conversion(hf_endpoint: str, bot_associated_user_name: str, bot_token: str, parquet_revision: str) -> ParquetCounters: logging.info('Create a Hub discussion to notify about parquet conversion') datasets = limit_to_one_dataset_per_namespace(get_datasets_with_last_updated_kind(kind=PARQUET_CACHE_KIND, days=DAYS)) logging.info(f'Creating discussions for {len(datasets)} datasets') log_batch = 100 counters = ParquetCounters() def get_log() -> str: return f' [{counters.datasets}/{len(datasets)}] {counters.new_discussions} discussions have been opened, {counters.dismissed_discussions} datasets already had a discussion (open or closed). {counters.errors} errors.' hf_api = HfApi(endpoint=hf_endpoint, token=bot_token) for dataset in datasets: counters.datasets += 1 prefix = f'[{counters.datasets}/{len(datasets)}]' logging.info(f'{prefix} Processing dataset {dataset}') try: try: next(hf_api.get_repo_discussions(repo_id=dataset, repo_type=REPO_TYPE_DATASET, token=bot_token, author=bot_associated_user_name)) counters.dismissed_discussions += 1 logging.info(f'{prefix} [dismissed] Dataset {dataset} already has a discussion, skipping') except StopIteration: hf_api.create_discussion(repo_id=dataset, repo_type=REPO_TYPE_DATASET, title='[bot] Conversion to Parquet', description=create_discussion_description(dataset=dataset, hf_endpoint=hf_endpoint, parquet_revision=parquet_revision, bot_associated_user_name=bot_associated_user_name), token=bot_token) counters.new_discussions += 1 logging.info(f'{prefix} [new] Dataset {dataset} has a new discussion') except Exception as e: counters.errors += 1 logging.warning(f'{prefix} [error] Failed to process dataset {dataset}: {e}') logging.debug(get_log()) if counters.datasets % log_batch == 0: logging.info(get_log()) logging.info(get_log()) logging.info('All the messages about parquet conversion have been posted.') return counters def create_discussion_description(dataset: str, hf_endpoint: str, parquet_revision: str, bot_associated_user_name: str) -> str: parquet_link = create_link(text=parquet_revision, dataset=dataset, hf_endpoint=hf_endpoint, revision_type='tree', revision=parquet_revision) return DISCUSSION_DESCRIPTION.format(bot_name=bot_associated_user_name, parquet_link=parquet_link) def create_link(text: str, dataset: str, hf_endpoint: str, revision_type: Literal['commit', 'tree'], revision: str) -> str: return f"[`{text}`]({hf_endpoint}/datasets/{dataset}/{revision_type}/{parse.quote(revision, safe='')})" def limit_to_one_dataset_per_namespace(datasets: list[str]) -> list[str]: namespaces: set[str] = set() selected_datasets: list[str] = [] for dataset in datasets: namespace = get_namespace(dataset) if namespace is None or namespace in namespaces: continue namespaces.add(namespace) selected_datasets.append(dataset) return selected_datasets def get_namespace(dataset: str) -> Optional[str]: splits = dataset.split('/') return splits[0] if len(splits) == 2 else None # File: dataset-viewer-main/jobs/cache_maintenance/src/cache_maintenance/main.py import logging import sys from datetime import datetime from libcommon.log import init_logging from libcommon.resources import CacheMongoResource, QueueMongoResource from libcommon.storage_client import StorageClient from cache_maintenance.backfill import backfill_all_datasets, backfill_retryable_errors from cache_maintenance.cache_metrics import collect_cache_metrics from cache_maintenance.config import JobConfig from cache_maintenance.discussions import post_messages from cache_maintenance.queue_metrics import collect_queue_metrics, collect_worker_size_jobs_count def run_job() -> None: job_config = JobConfig.from_env() action = job_config.action if not action: logging.warning('No action mode was selected, skipping tasks.') return init_logging(level=job_config.log.level) with CacheMongoResource(database=job_config.cache.mongo_database, host=job_config.cache.mongo_url) as cache_resource, QueueMongoResource(database=job_config.queue.mongo_database, host=job_config.queue.mongo_url) as queue_resource: start_time = datetime.now() if action in ('backfill', 'backfill-retryable-errors'): if not cache_resource.is_available(): logging.warning('The connection to the cache database could not be established. The action is skipped.') return if not queue_resource.is_available(): logging.warning('The connection to the queue database could not be established. The action is skipped.') return cached_assets_storage_client = StorageClient(protocol=job_config.cached_assets.storage_protocol, storage_root=job_config.cached_assets.storage_root, base_url=job_config.cached_assets.base_url, s3_config=job_config.s3) assets_storage_client = StorageClient(protocol=job_config.assets.storage_protocol, storage_root=job_config.assets.storage_root, base_url=job_config.assets.base_url, s3_config=job_config.s3) if action == 'backfill': backfill_all_datasets(hf_endpoint=job_config.common.hf_endpoint, hf_token=job_config.common.hf_token, blocked_datasets=job_config.common.blocked_datasets, storage_clients=[cached_assets_storage_client, assets_storage_client]) else: backfill_retryable_errors(hf_endpoint=job_config.common.hf_endpoint, hf_token=job_config.common.hf_token, blocked_datasets=job_config.common.blocked_datasets, storage_clients=[cached_assets_storage_client, assets_storage_client]) elif action == 'collect-queue-metrics': if not queue_resource.is_available(): logging.warning('The connection to the queue database could not be established. The action is skipped.') return collect_queue_metrics() collect_worker_size_jobs_count() elif action == 'collect-cache-metrics': if not cache_resource.is_available(): logging.warning('The connection to the cache database could not be established. The action is skipped.') return collect_cache_metrics() elif action == 'post-messages': if not cache_resource.is_available(): logging.warning('The connection to the cache database could not be established. The action is skipped.') return post_messages(hf_endpoint=job_config.common.hf_endpoint, bot_associated_user_name=job_config.discussions.bot_associated_user_name, bot_token=job_config.discussions.bot_token, parquet_revision=job_config.discussions.parquet_revision) elif action == 'skip': pass else: logging.warning(f"Action '{action}' is not supported.") end_time = datetime.now() logging.info(f'Duration: {end_time - start_time}') if __name__ == '__main__': try: run_job() sys.exit(0) except Exception as e: logging.exception(e) sys.exit(1) # File: dataset-viewer-main/jobs/cache_maintenance/src/cache_maintenance/queue_metrics.py import logging from libcommon.queue.jobs import Queue from libcommon.queue.metrics import JobTotalMetricDocument, WorkerSizeJobsCountDocument def collect_queue_metrics() -> None: logging.info('updating queue metrics') new_metric_by_id = Queue().get_jobs_total_by_type_status_and_dataset_status() new_ids = set(new_metric_by_id.keys()) old_ids = set(((metric.job_type, metric.status, metric.dataset_status) for metric in JobTotalMetricDocument.objects())) to_delete = old_ids - new_ids for (job_type, status, dataset_status) in to_delete: JobTotalMetricDocument.objects(job_type=job_type, status=status, dataset_status=dataset_status).delete() logging.info(f'job_type={job_type!r} status={status!r} dataset_status={dataset_status!r}: has been deleted') for ((job_type, status, dataset_status), total) in new_metric_by_id.items(): JobTotalMetricDocument.objects(job_type=job_type, status=status, dataset_status=dataset_status).upsert_one(total=total) logging.info(f'job_type={job_type!r} status={status!r} dataset_status={dataset_status!r}: total={total!r} has been inserted') logging.info('queue metrics have been updated') def collect_worker_size_jobs_count() -> None: logging.info('updating worker_size_jobs_count metrics') new_metric_by_worker_size = Queue().get_jobs_count_by_worker_size() new_ids = set((worker_size for worker_size in new_metric_by_worker_size.keys())) old_ids = set((metric.worker_size.value for metric in WorkerSizeJobsCountDocument.objects())) to_delete = old_ids - new_ids for worker_size in to_delete: WorkerSizeJobsCountDocument.objects(worker_size=worker_size).delete() logging.info(f'worker_size={worker_size!r} has been deleted') for (worker_size, jobs_count) in new_metric_by_worker_size.items(): WorkerSizeJobsCountDocument.objects(worker_size=worker_size).upsert_one(jobs_count=jobs_count) logging.info(f'worker_size={worker_size!r}: jobs_count={jobs_count!r} has been inserted') logging.info('worker_size_jobs_count metrics have been updated') # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/check.py import logging from collections.abc import Callable, Iterator from typing import Optional, TypeVar from mongoengine import Document from pymongo.collection import Collection U = TypeVar('U', bound=Document) DocumentClass = type[U] CustomValidation = Callable[[U], None] def get_random_oids(collection: Collection, sample_size: int) -> list[int]: pipeline = [{'$project': {'_id': 1}}, {'$sample': {'size': sample_size}}] return [s['_id'] for s in collection.aggregate(pipeline)] def get_random_documents(DocCls: DocumentClass[Document], sample_size: int) -> Iterator[Document]: doc_collection = DocCls._get_collection() random_oids = get_random_oids(doc_collection, sample_size) return DocCls.objects(pk__in=random_oids) def check_documents(DocCls: DocumentClass[Document], sample_size: int, custom_validation: Optional[CustomValidation[Document]]=None) -> None: for doc in get_random_documents(DocCls, sample_size): try: doc.validate() for field in doc._fields: try: getattr(doc, field) except Exception: logging.error(f'Could not load field {field} in Document {doc.pk}. Document: {doc.to_json()}') raise if custom_validation is not None: custom_validation(doc) except Exception as e: logging.error(f'Validation error on document {doc.pk}: {e}. Document: {doc.to_json()}') raise e # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/collector.py from libcommon.constants import CACHE_METRICS_COLLECTION, QUEUE_COLLECTION_DATASET_BLOCKAGES, QUEUE_COLLECTION_PAST_JOBS, QUEUE_MONGOENGINE_ALIAS, TYPE_STATUS_AND_DATASET_STATUS_JOB_COUNTS_COLLECTION from mongodb_migration.deletion_migrations import CacheDeletionMigration, MetricsDeletionMigration, MigrationDeleteIndex, MigrationDeleteJobsByStatus, MigrationQueueDeleteTTLIndex, MigrationRemoveFieldFromCache, MigrationRemoveFieldFromJob, QueueDeletionMigration from mongodb_migration.drop_migrations import MigrationDropCollection from mongodb_migration.migration import Migration from mongodb_migration.migrations._20221110230400_example import MigrationExample from mongodb_migration.migrations._20221116133500_queue_job_add_force import MigrationAddForceToJob from mongodb_migration.migrations._20221117223000_cache_generic_response import MigrationMoveToGenericCachedResponse from mongodb_migration.migrations._20230126164900_queue_job_add_priority import MigrationAddPriorityToJob from mongodb_migration.migrations._20230309123100_cache_add_progress import MigrationAddProgressToCacheResponse from mongodb_migration.migrations._20230309141600_cache_add_job_runner_version import MigrationAddJobRunnerVersionToCacheResponse from mongodb_migration.migrations._20230511100700_queue_delete_indexes_with_force import MigrationQueueDeleteIndexesWithForce from mongodb_migration.migrations._20230516101500_queue_job_add_revision import MigrationQueueAddRevisionToJob from mongodb_migration.migrations._20230516101600_queue_delete_index_without_revision import MigrationQueueDeleteIndexWithoutRevision from mongodb_migration.migrations._20230622131500_lock_add_owner import MigrationAddOwnerToQueueLock from mongodb_migration.migrations._20230703110100_cache_add_partial_field_in_config_parquet_and_info import MigrationAddPartialToCacheResponse from mongodb_migration.migrations._20230705160600_queue_job_add_difficulty import MigrationQueueAddDifficultyToJob from mongodb_migration.migrations._20230926095900_cache_add_has_fts_field_in_split_duckdb_index import MigrationAddHasFTSToSplitDuckdbIndexCacheResponse from mongodb_migration.migrations._20231106193200_cache_add_partial_field_in_split_duckdb_index import MigrationAddPartialToSplitDuckdbIndexCacheResponse from mongodb_migration.migrations._20240104085000_cache_add_retries import MigrationAddRetriesToCacheResponse from mongodb_migration.migrations._20240109160700_cache_add_failed_runs import MigrationAddFailedRunsToCacheResponse from mongodb_migration.migrations._20240112164500_cache_add_partial_field_in_split_descriptive_statistics import MigrationAddPartialToSplitDescriptiveStatisticsCacheResponse from mongodb_migration.migrations._20240206153000_cache_add_tags_in_hub_cache import MigrationAddTagsToHubCacheCacheResponse from mongodb_migration.migrations._20240221103200_cache_merge_config_split_names import MigrationMergeConfigSplitNamesResponses from mongodb_migration.migrations._20240221160700_cache_merge_split_first_rows import MigrationMergeSplitFirstRowsResponses from mongodb_migration.migrations._20240221160800_cache_set_updated_at_to_root_step import MigrationSetUpdatedAtToOldestStep from mongodb_migration.migrations._20240619124500_cache_add_estimated_dataset_info_field_parquet_and_info import MigrationAddEstimatedDatasetInfoToParquetAndInfoCacheResponse from mongodb_migration.migrations._20240624144000_cache_add_estimated_num_rows_field_in_size import MigrationAddEstimatedNumRowsToSizeCacheResponse from mongodb_migration.migrations._20240626095000_cache_add_stemmer_in_split_duckdb_index import MigrationAddStemmerToSplitDuckdbIndexCacheResponse from mongodb_migration.migrations._20240626151600_cache_remove_has_fts_field_in_split_duckdb_index import MigrationRemoveHasFTSFromSplitDuckdbIndexCacheResponse from mongodb_migration.migrations._20240703160300_cache_add_duration import MigrationAddDurationToCacheResponse from mongodb_migration.migrations._20240731143600_queue_add_dataset_status_to_queue_metrics import MigrationAddDatasetStatusToQueueMetrics from mongodb_migration.renaming_migrations import CacheRenamingMigration, QueueRenamingMigration class MigrationsCollector: def get_migrations(self) -> list[Migration]: return [MigrationExample(version='20221110230400', description='example'), MigrationAddForceToJob(version='20221116133500', description="add 'force' field to jobs in queue database"), MigrationMoveToGenericCachedResponse(version='20221117223000', description='replace SplitsResponse and FirstRowsResponse with a generic CachedResponse'), MigrationAddPriorityToJob(version='20230126164900', description="add 'priority' field to jobs in queue database"), CacheRenamingMigration(cache_kind='/split-names', new_cache_kind='/split-names-from-streaming', version='20230216112500'), QueueRenamingMigration(job_type='/split-names', new_job_type='/split-names-from-streaming', version='20230216141000'), MigrationAddProgressToCacheResponse(version='20230309123100', description="add the 'progress' field with the default value (1.0) to the cached results"), MigrationAddJobRunnerVersionToCacheResponse(version='20230309141600', description="add 'job_runner_version' field based on 'worker_version' value"), MigrationRemoveFieldFromCache(version='20230313164200', description="remove 'worker_version' field from cache", field_name='worker_version'), CacheRenamingMigration(cache_kind='/first-rows', new_cache_kind='split-first-rows-from-streaming', version='20230320163700'), QueueRenamingMigration(job_type='/first-rows', new_job_type='split-first-rows-from-streaming', version='20230320165700'), CacheRenamingMigration(cache_kind='/dataset-info', new_cache_kind='dataset-info', version='20230323155000'), QueueRenamingMigration(job_type='/dataset-info', new_job_type='dataset-info', version='20230323160000'), QueueDeletionMigration(job_type='/splits', version='20230407091400'), CacheDeletionMigration(cache_kind='/splits', version='20230407091500'), QueueDeletionMigration(job_type='/parquet-and-dataset-info', version='20230424173000'), CacheDeletionMigration(cache_kind='/parquet-and-dataset-info', version='20230424174000'), MetricsDeletionMigration(job_type='/parquet-and-dataset-info', cache_kind='/parquet-and-dataset-info', version='20230427121500'), MigrationQueueDeleteTTLIndex(version='20230428145000', description="delete the TTL index on the 'finished_at' field in the queue database", field_name='finished_at'), CacheDeletionMigration(cache_kind='dataset-split-names-from-streaming', version='20230428175100'), QueueDeletionMigration(job_type='dataset-split-names-from-streaming', version='20230428181800'), MetricsDeletionMigration(job_type='dataset-split-names-from-streaming', cache_kind='dataset-split-names-from-streaming', version='20230428193100'), CacheDeletionMigration(cache_kind='dataset-split-names-from-dataset-info', version='20230504185100'), QueueDeletionMigration(job_type='dataset-split-names-from-dataset-info', version='20230504192200'), MetricsDeletionMigration(job_type='dataset-split-names-from-dataset-info', cache_kind='dataset-split-names-from-dataset-info', version='20230504194600'), MigrationRemoveFieldFromJob(field_name='force', version='20230511100600', description="remove 'force' field from queue"), MigrationQueueDeleteIndexesWithForce(version='20230511100700', description="remove indexes with field 'force'"), MigrationDeleteJobsByStatus(status_list=['skipped'], version='20230511110700', description='delete jobs with skipped status'), MigrationQueueAddRevisionToJob(version='20230516101500', description="add 'revision' field to jobs in queue database"), MigrationQueueDeleteIndexWithoutRevision(version='20230516101600', description='remove index without revision'), CacheRenamingMigration(cache_kind='/split-names-from-streaming', new_cache_kind='config-split-names-from-streaming', version='20230516164500'), QueueRenamingMigration(job_type='/split-names-from-streaming', new_job_type='config-split-names-from-streaming', version='20230516164700'), MetricsDeletionMigration(job_type='/split-names-from-streaming', cache_kind='/split-names-from-streaming', version='20230522094400'), MigrationQueueDeleteTTLIndex(version='20230523171700', description="delete the TTL index on the 'finished_at' field in the queue database to update its TTL value", field_name='finished_at'), CacheRenamingMigration(cache_kind='/split-names-from-dataset-info', new_cache_kind='config-split-names-from-info', version='20230524095900'), QueueRenamingMigration(job_type='/split-names-from-dataset-info', new_job_type='config-split-names-from-info', version='20230524095901'), MetricsDeletionMigration(job_type='/split-names-from-dataset-info', cache_kind='/split-names-from-dataset-info', version='20230524095902'), CacheRenamingMigration(cache_kind='/config-names', new_cache_kind='dataset-config-names', version='20230524192200'), QueueRenamingMigration(job_type='/config-names', new_job_type='dataset-config-names', version='20230524192300'), MetricsDeletionMigration(job_type='/config-names', cache_kind='/config-names', version='20230524192400'), MigrationQueueDeleteTTLIndex(version='20230607154800', description="delete the TTL index on the 'finished_at' field in the queue database to update its TTL condition", field_name='finished_at'), MigrationQueueDeleteTTLIndex(version='202306201100', description="delete the TTL index on the 'finished_at' field in the queue database to update its TTL condition", field_name='finished_at'), MigrationAddOwnerToQueueLock(version='20230622131800', description="add 'owner' field copying the job_id value"), MigrationAddPartialToCacheResponse(version='20230703110100', description="add 'partial' field to config-parquet-and-info"), MigrationQueueAddDifficultyToJob(version='20230705160600', description="add 'difficulty' field to jobs"), MigrationDropCollection(version='20230811063600', description='drop cache metrics collection', alias='metrics', collection_name=CACHE_METRICS_COLLECTION), MigrationDropCollection(version='20230814121400', description='drop queue metrics collection', alias='metrics', collection_name=TYPE_STATUS_AND_DATASET_STATUS_JOB_COUNTS_COLLECTION), MigrationAddHasFTSToSplitDuckdbIndexCacheResponse(version='20230926095900', description="add 'has_fts' field for 'split-duckdb-index' cache records"), MigrationAddPartialToSplitDuckdbIndexCacheResponse(version='20231106193200', description="add 'partial', 'num_rows' and 'num_bytes' fields for 'split-duckdb-index' cache records"), MigrationDeleteJobsByStatus(status_list=['success', 'error', 'cancelled'], version='20231201074900', description='delete jobs with success, error and cancelled status'), MigrationQueueDeleteTTLIndex(version='20231201112000', description="delete the TTL index on the 'finished_at' field in the queue database", field_name='finished_at'), MigrationRemoveFieldFromJob(field_name='finished_at', version='20231201112600', description="remove 'finished_at' field from queue"), MigrationAddRetriesToCacheResponse(version='20240104085000', description="add 'retries' field to cache records"), MigrationRemoveFieldFromCache(version='20240109155600', description="remove 'retries' field from cache", field_name='retries'), MigrationAddFailedRunsToCacheResponse(version='20240109160700', description="add 'failed_runs' filed to cache records"), MigrationAddTagsToHubCacheCacheResponse(version='20240206153000', description="add the 'tags' fields to dataset-hub-cache"), MigrationAddPartialToSplitDescriptiveStatisticsCacheResponse(version='20240216111500', description="add 'partial' field to split-descriptive-statistics cache records"), MigrationMergeConfigSplitNamesResponses(version='20240221103200', description="merge 'config-split-names-from-streaming' and 'config-split-names-from-info' responses to 'config-split-names'"), MigrationMergeSplitFirstRowsResponses(version='20240221160700', description="merge 'split-first-rows-from-streaming' and 'split-first-rows-from-parquet' responses to 'split-first-rows'"), MigrationSetUpdatedAtToOldestStep(version='20240221160800', description="set 'updated_at' of the root step to all the cache entries for each dataset"), CacheDeletionMigration(version='20240223090800', cache_kind='split-duckdb-index'), CacheRenamingMigration(version='20240223090900', cache_kind='split-duckdb-index-010', new_cache_kind='split-duckdb-index'), QueueRenamingMigration(version='20240223091000', job_type='split-duckdb-index-010', new_job_type='split-duckdb-index'), QueueRenamingMigration(version='20240307191001', job_type='dataset-loading-tags', new_job_type='dataset-compatible-libraries'), CacheRenamingMigration(cache_kind='dataset-loading-tags', new_cache_kind='dataset-compatible-libraries', version='20240307191000'), MigrationAddEstimatedDatasetInfoToParquetAndInfoCacheResponse(version='20240619124500', description="add 'estimated_dataset_info' field to config-parquet-and-info cache records"), MigrationDeleteIndex(version='20240624120300', description='delete the TTL index in the pastJobs collection', database=QUEUE_MONGOENGINE_ALIAS, collection=QUEUE_COLLECTION_PAST_JOBS, index_name='PAST_JOB_EXPIRE_AFTER_SECONDS'), MigrationDeleteIndex(version='20240624120301', description='delete the TTL index in the blockedDatasets collection', database=QUEUE_MONGOENGINE_ALIAS, collection=QUEUE_COLLECTION_DATASET_BLOCKAGES, index_name='DATASET_BLOCKAGE_EXPIRE_AFTER_SECONDS'), MigrationAddEstimatedNumRowsToSizeCacheResponse(version='20240624144000', description="add 'estimated_num_rows' field to config-size and dataset-size cache records"), MigrationAddStemmerToSplitDuckdbIndexCacheResponse(version='20240626095000', description="add 'stemmer' field for 'split-duckdb-index' cache records"), MigrationRemoveHasFTSFromSplitDuckdbIndexCacheResponse(version='20240626151600', description="remove 'has_fts' field from 'split-duckdb-index' cache records"), MigrationAddDurationToCacheResponse(version='20240703160300', description="add 'duration' field to cache records"), MigrationAddDatasetStatusToQueueMetrics(version='20240731143600', description="add 'dataset_status' field to the jobs metrics")] # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/config.py from dataclasses import dataclass, field from environs import Env from libcommon.config import CacheConfig, LogConfig, QueueConfig DATABASE_MIGRATIONS_MONGO_DATABASE = 'dataset_viewer_maintenance' DATABASE_MIGRATIONS_MONGO_URL = 'mongodb://localhost:27017' @dataclass(frozen=True) class DatabaseMigrationsConfig: mongo_database: str = DATABASE_MIGRATIONS_MONGO_DATABASE mongo_url: str = DATABASE_MIGRATIONS_MONGO_URL @classmethod def from_env(cls) -> 'DatabaseMigrationsConfig': env = Env(expand_vars=True) with env.prefixed('DATABASE_MIGRATIONS_'): return cls(mongo_database=env.str(name='MONGO_DATABASE', default=DATABASE_MIGRATIONS_MONGO_DATABASE), mongo_url=env.str(name='MONGO_URL', default=DATABASE_MIGRATIONS_MONGO_URL)) @dataclass(frozen=True) class JobConfig: cache: CacheConfig = field(default_factory=CacheConfig) log: LogConfig = field(default_factory=LogConfig) database_migrations: DatabaseMigrationsConfig = field(default_factory=DatabaseMigrationsConfig) queue: QueueConfig = field(default_factory=QueueConfig) @classmethod def from_env(cls) -> 'JobConfig': return cls(log=LogConfig.from_env(), cache=CacheConfig.from_env(), database_migrations=DatabaseMigrationsConfig.from_env(), queue=QueueConfig.from_env()) # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/database_migrations.py import types from typing import Generic, TypeVar from mongoengine import Document from mongoengine.fields import StringField from mongoengine.queryset.queryset import QuerySet from mongodb_migration.constants import DATABASE_MIGRATIONS_COLLECTION_MIGRATIONS, DATABASE_MIGRATIONS_MONGOENGINE_ALIAS U = TypeVar('U', bound=Document) def no_op(self, _): return self QuerySet.__class_getitem__ = types.MethodType(no_op, QuerySet) class QuerySetManager(Generic[U]): def __get__(self, instance: object, cls: type[U]) -> QuerySet[U]: return QuerySet(cls, cls._get_collection()) class DatabaseMigration(Document): meta = {'collection': DATABASE_MIGRATIONS_COLLECTION_MIGRATIONS, 'db_alias': DATABASE_MIGRATIONS_MONGOENGINE_ALIAS} version = StringField(required=True) description = StringField(required=True) objects = QuerySetManager['DatabaseMigration']() def _clean_maintenance_database() -> None: DatabaseMigration.drop_collection() # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/deletion_migrations.py import logging from collections.abc import Mapping from typing import Any, Optional from libcommon.queue.jobs import JobDocument from libcommon.simple_cache import CachedResponseDocument from mongoengine.connection import get_db from mongodb_migration.check import check_documents from mongodb_migration.migration import BaseCacheMigration, BaseQueueMigration, CacheMigration, IrreversibleMigrationError, Migration, QueueMigration class MetricsDeletionMigration(Migration): def __init__(self, job_type: str, cache_kind: str, version: str, description: Optional[str]=None): if not description: description = f"[deprecated] no-op migration for job type '{job_type}' and cache kind '{cache_kind}'" super().__init__(version=version, description=description) def up(self) -> None: logging.info('[deprecated] no-op') def down(self) -> None: logging.info('[deprecated] no-op') def validate(self) -> None: logging.info('[deprecated] no-op') class CacheDeletionMigration(CacheMigration): def __init__(self, cache_kind: str, version: str, description: Optional[str]=None): if not description: description = f"delete the cache entries of kind '{cache_kind}'" super().__init__(cache_kind=cache_kind, version=version, description=description) def up(self) -> None: logging.info(f'Delete cache entries of kind {self.cache_kind}') db = get_db(self.MONGOENGINE_ALIAS) result = db[self.COLLECTION_RESPONSES].delete_many({'kind': self.cache_kind}) logging.info(f'{result.deleted_count} deleted cache entries') def down(self) -> None: raise IrreversibleMigrationError('This migration does not support rollback') def validate(self) -> None: logging.info(f'Check that none of the documents has the {self.cache_kind} kind') db = get_db(self.MONGOENGINE_ALIAS) if db[self.COLLECTION_RESPONSES].count_documents({'kind': self.cache_kind}): raise ValueError(f'Found documents with kind {self.cache_kind}') class QueueDeletionMigration(QueueMigration): def __init__(self, job_type: str, version: str, description: Optional[str]=None): if not description: description = f"delete the jobs of type '{job_type}'" super().__init__(job_type=job_type, version=version, description=description) def up(self) -> None: logging.info(f'Delete jobs of type {self.job_type}') db = get_db(self.MONGOENGINE_ALIAS) result = db[self.COLLECTION_JOBS].delete_many({'type': self.job_type}) logging.info(f'{result.deleted_count} deleted jobs') def down(self) -> None: raise IrreversibleMigrationError('This migration does not support rollback') def validate(self) -> None: logging.info(f'Check that none of the documents has the {self.job_type} type') db = get_db(self.MONGOENGINE_ALIAS) if db[self.COLLECTION_JOBS].count_documents({'type': self.job_type}): raise ValueError(f'Found documents with type {self.job_type}') def get_index_names(index_information: Mapping[str, Any], field_name: str) -> list[str]: return [name for (name, value) in index_information.items() if isinstance(value, dict) and 'expireAfterSeconds' in value and ('key' in value) and (value['key'] == [(field_name, 1)])] class MigrationQueueDeleteTTLIndex(BaseQueueMigration): def __init__(self, version: str, description: str, field_name: str): super().__init__(version=version, description=description) self.field_name = field_name def up(self) -> None: logging.info(f'Delete ttl index on field {self.field_name}. Mongoengine will create it again with a different TTL parameter') db = get_db(self.MONGOENGINE_ALIAS) collection = db[self.COLLECTION_JOBS] ttl_index_names = get_index_names(index_information=collection.index_information(), field_name=self.field_name) if len(ttl_index_names) != 1: raise ValueError(f'Expected 1 ttl index on field {self.field_name}, found {len(ttl_index_names)}') collection.drop_index(ttl_index_names[0]) def down(self) -> None: raise IrreversibleMigrationError('This migration does not support rollback') def validate(self) -> None: logging.info('Check that the index does not exists anymore') db = get_db(self.MONGOENGINE_ALIAS) collection = db[self.COLLECTION_JOBS] ttl_index_names = get_index_names(index_information=collection.index_information(), field_name=self.field_name) if len(ttl_index_names) > 0: raise ValueError(f'Found TTL index for field {self.field_name}') class MigrationDeleteIndex(Migration): def __init__(self, version: str, description: str, database: str, collection: str, index_name: str): super().__init__(version=version, description=description) self.database = database self.collection = collection self.index_name = index_name def up(self) -> None: logging.info(f'Delete ttl index {self.index_name}.') db = get_db(self.database) collection = db[self.collection] collection.drop_index(self.index_name) def down(self) -> None: raise IrreversibleMigrationError('This migration does not support rollback') def validate(self) -> None: logging.info('Check that the index does not exists anymore') db = get_db(self.database) collection = db[self.collection] if self.index_name in collection.index_information(): raise ValueError(f'Index still exists: {self.index_name}') class MigrationDeleteJobsByStatus(BaseQueueMigration): def __init__(self, status_list: list[str], version: str, description: str): super().__init__(version=version, description=description) self.status_list = status_list def up(self) -> None: logging.info(f'Delete jobs with status {self.status_list}.') db = get_db(self.MONGOENGINE_ALIAS) db[self.COLLECTION_JOBS].delete_many({'status': {'$in': self.status_list}}) def down(self) -> None: raise IrreversibleMigrationError('This migration does not support rollback') def validate(self) -> None: logging.info('Check that jobs with status list dont exist') db = get_db(self.MONGOENGINE_ALIAS) if db[self.COLLECTION_JOBS].count_documents({'status': {'$in': self.status_list}}): raise ValueError(f'Found documents with status in {self.status_list}') class MigrationRemoveFieldFromJob(BaseQueueMigration): def __init__(self, field_name: str, version: str, description: str): super().__init__(version=version, description=description) self.field_name = field_name def up(self) -> None: logging.info(f"Removing '{self.field_name}' field.") db = get_db(self.MONGOENGINE_ALIAS) db[self.COLLECTION_JOBS].update_many({}, {'$unset': {self.field_name: ''}}) def down(self) -> None: raise IrreversibleMigrationError('This migration does not support rollback') def validate(self) -> None: logging.info(f"Ensure that a random selection of jobs don't have '{self.field_name}' field") check_documents(DocCls=JobDocument, sample_size=10) class MigrationRemoveFieldFromCache(BaseCacheMigration): def __init__(self, field_name: str, version: str, description: Optional[str]=None): if not description: description = f"remove '{field_name}' field from cache" super().__init__(version=version, description=description) self.field_name = field_name def up(self) -> None: logging.info(f"Removing '{self.field_name}' field.") db = get_db(self.MONGOENGINE_ALIAS) db[self.COLLECTION_RESPONSES].update_many({}, {'$unset': {self.field_name: ''}}) def down(self) -> None: raise IrreversibleMigrationError('This migration does not support rollback') def validate(self) -> None: logging.info(f"Ensure that a random selection of documents don't have '{self.field_name}' field") check_documents(DocCls=CachedResponseDocument, sample_size=10) # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/drop_migrations.py import logging from mongoengine.connection import get_db from mongodb_migration.migration import IrreversibleMigrationError, Migration class MigrationDropCollection(Migration): def __init__(self, version: str, description: str, collection_name: str, alias: str): super().__init__(version=version, description=description) self.collection_name = collection_name self.alias = alias def up(self) -> None: logging.info(f'drop {self.collection_name} collection from {self.alias}') db = get_db(self.alias) db[self.collection_name].drop() def down(self) -> None: raise IrreversibleMigrationError('This migration does not support rollback') def validate(self) -> None: logging.info('check that collection does not exist') db = get_db(self.alias) collections = db.list_collection_names() if self.collection_name in collections: raise ValueError(f'found collection with name {self.collection_name}') # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/main.py import logging import sys from libcommon.log import init_logging from libcommon.resources import CacheMongoResource, QueueMongoResource from mongodb_migration.collector import MigrationsCollector from mongodb_migration.config import JobConfig from mongodb_migration.plan import Plan from mongodb_migration.resources import MigrationsMongoResource def run_job() -> None: job_config = JobConfig.from_env() init_logging(level=job_config.log.level) with CacheMongoResource(database=job_config.cache.mongo_database, host=job_config.cache.mongo_url) as cache_resource, QueueMongoResource(database=job_config.queue.mongo_database, host=job_config.queue.mongo_url) as queue_resource, MigrationsMongoResource(database=job_config.database_migrations.mongo_database, host=job_config.database_migrations.mongo_url) as migrations_database_resource: if not cache_resource.is_available(): logging.warning('The connection to the cache database could not be established. The migration job is skipped.') return if not queue_resource.is_available(): logging.warning('The connection to the queue database could not be established. The migration job is skipped.') return if not migrations_database_resource.is_available(): logging.warning('The connection to the migrations database could not be established. The migration job is skipped.') return collected_migrations = MigrationsCollector().get_migrations() Plan(collected_migrations=collected_migrations).execute() if __name__ == '__main__': try: run_job() sys.exit(0) except Exception as e: logging.error(e) sys.exit(1) # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migration.py import datetime from abc import ABC, abstractmethod from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS, QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS class IrreversibleMigrationError(Exception): pass class Migration(ABC): def __init__(self, version: str, description: str): if version is None or description is None: raise ValueError('The version and the description are required.') try: datetime.datetime.strptime(version, '%Y%m%d%H%M%S') except Exception as e: raise ValueError('The version should be a string representing a date in the format YYYYMMDDHHMMSS') from e self.version = version self.description = description @abstractmethod def up(self) -> None: raise NotImplementedError() @abstractmethod def validate(self) -> None: raise NotImplementedError() @abstractmethod def down(self) -> None: raise IrreversibleMigrationError() class BaseQueueMigration(Migration): MONGOENGINE_ALIAS: str = QUEUE_MONGOENGINE_ALIAS COLLECTION_JOBS: str = QUEUE_COLLECTION_JOBS def __init__(self, version: str, description: str): super().__init__(version=version, description=description) class QueueMigration(BaseQueueMigration): def __init__(self, job_type: str, version: str, description: str): self.job_type = job_type super().__init__(version=version, description=description) class BaseCacheMigration(Migration): MONGOENGINE_ALIAS: str = CACHE_MONGOENGINE_ALIAS COLLECTION_RESPONSES: str = CACHE_COLLECTION_RESPONSES def __init__(self, version: str, description: str): super().__init__(version=version, description=description) class CacheMigration(BaseCacheMigration): def __init__(self, cache_kind: str, version: str, description: str): self.cache_kind = cache_kind super().__init__(version=version, description=description) # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20221116133500_queue_job_add_force.py import logging from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS from mongoengine.connection import get_db from mongodb_migration.migration import Migration class MigrationAddForceToJob(Migration): def up(self) -> None: logging.info('If missing, add the force field with the default value (False) to the jobs') db = get_db(QUEUE_MONGOENGINE_ALIAS) db[QUEUE_COLLECTION_JOBS].update_many({'force': {'$exists': False}}, {'$set': {'force': False}}) def down(self) -> None: logging.info('Remove the force field from all the jobs') db = get_db(QUEUE_MONGOENGINE_ALIAS) db[QUEUE_COLLECTION_JOBS].update_many({}, {'$unset': {'force': ''}}) def validate(self) -> None: logging.info("Ensure that a random selection of jobs have the 'force' field") # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20221117223000_cache_generic_response.py import contextlib import logging from libcommon.simple_cache import CachedResponseDocument from mongoengine.connection import get_db from pymongo.errors import InvalidName from mongodb_migration.check import check_documents from mongodb_migration.migration import Migration db_name = 'cache' splitsResponseCollection = 'splitsResponse' firstRowsResponseCollection = 'firstRowsResponse' cachedResponseCollection = 'cachedResponsesBlue' SPLITS_KIND = '/splits' FIRST_ROWS_KIND = '/first-rows' class MigrationMoveToGenericCachedResponse(Migration): def up(self) -> None: logging.info(f'Create the {cachedResponseCollection} collection, and fill it with the data from splits and first-rows') db = get_db(db_name) with contextlib.suppress(InvalidName): for splits_response in db[splitsResponseCollection].find(): if not isinstance(splits_response, dict): raise ValueError('splits_response should be a dict') db[cachedResponseCollection].insert_one({'_id': splits_response.get('_id'), 'kind': SPLITS_KIND, 'dataset': splits_response.get('dataset_name'), 'config': None, 'split': None, 'http_status': splits_response.get('http_status'), 'error_code': splits_response.get('error_code'), 'content': splits_response.get('response'), 'worker_version': splits_response.get('worker_version'), 'dataset_git_revision': splits_response.get('dataset_git_revision'), 'details': splits_response.get('details'), 'updated_at': splits_response.get('updated_at')}) with contextlib.suppress(InvalidName): for first_rows_response in db[firstRowsResponseCollection].find(): if not isinstance(first_rows_response, dict): raise ValueError('first_rows_response should be a dict') db[cachedResponseCollection].insert_one({'_id': first_rows_response.get('_id'), 'kind': FIRST_ROWS_KIND, 'dataset': first_rows_response.get('dataset_name'), 'config': first_rows_response.get('config_name'), 'split': first_rows_response.get('split_name'), 'http_status': first_rows_response.get('http_status'), 'error_code': first_rows_response.get('error_code'), 'content': first_rows_response.get('response'), 'worker_version': first_rows_response.get('worker_version'), 'dataset_git_revision': first_rows_response.get('dataset_git_revision'), 'details': first_rows_response.get('details'), 'updated_at': first_rows_response.get('updated_at')}) def down(self) -> None: logging.info(f'Delete the {cachedResponseCollection} collection') db = get_db(db_name) with contextlib.suppress(InvalidName): db[cachedResponseCollection].drop() def validate(self) -> None: logging.info('Validate the migrated documents') check_documents(DocCls=CachedResponseDocument, sample_size=10) db = get_db(db_name) try: splits_responses_count = db[splitsResponseCollection].count_documents({}) except InvalidName: splits_responses_count = 0 try: first_rows_responses_count = db[firstRowsResponseCollection].count_documents({}) except InvalidName: first_rows_responses_count = 0 cached_responses_count = CachedResponseDocument.objects.count() if splits_responses_count + first_rows_responses_count > cached_responses_count: raise ValueError(f'Some documents are missing in the new collection: splitsResponse ({splits_responses_count}), firstRowsResponse ({first_rows_responses_count}), cachedResponseBlue ({cached_responses_count})') # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230126164900_queue_job_add_priority.py import logging from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS from libcommon.queue.jobs import JobDocument from mongoengine.connection import get_db from mongodb_migration.check import check_documents from mongodb_migration.migration import Migration class MigrationAddPriorityToJob(Migration): def up(self) -> None: logging.info("If missing, add the priority field with the default value ('normal') to the jobs") db = get_db(QUEUE_MONGOENGINE_ALIAS) db[QUEUE_COLLECTION_JOBS].update_many({'priority': {'$exists': False}}, {'$set': {'priority': 'normal'}}) def down(self) -> None: logging.info('Remove the priority field from all the jobs') db = get_db(QUEUE_MONGOENGINE_ALIAS) db[QUEUE_COLLECTION_JOBS].update_many({}, {'$unset': {'priority': ''}}) def validate(self) -> None: logging.info("Ensure that a random selection of jobs have the 'priority' field set") check_documents(DocCls=JobDocument, sample_size=10) # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230309123100_cache_add_progress.py import logging from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS from libcommon.simple_cache import CachedResponseDocument from mongoengine.connection import get_db from mongodb_migration.check import check_documents from mongodb_migration.migration import Migration class MigrationAddProgressToCacheResponse(Migration): def up(self) -> None: logging.info('If missing, add the progress field with the default value (1.0) to the cached results') db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].update_many({'progress': {'$exists': False}}, {'$set': {'progress': 1.0}}) def down(self) -> None: logging.info('Remove the progress field from all the cached results') db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].update_many({}, {'$unset': {'progress': ''}}) def validate(self) -> None: logging.info("Ensure that a random selection of cached results have the 'progress' field") check_documents(DocCls=CachedResponseDocument, sample_size=10) # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230309141600_cache_add_job_runner_version.py import logging from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS from libcommon.simple_cache import CachedResponseDocument from mongoengine.connection import get_db from mongodb_migration.check import check_documents from mongodb_migration.migration import Migration class MigrationAddJobRunnerVersionToCacheResponse(Migration): def up(self) -> None: logging.info("If missing, add 'job_runner_version' field based on 'worker_version' value") db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].update_many({'job_runner_version': {'$exists': False}}, [{'$set': {'job_runner_version': {'$convert': {'input': {'$first': {'$split': ['$worker_version', '.']}}, 'to': 'int', 'onError': None, 'onNull': None}}}}]) def down(self) -> None: logging.info("Remove 'job_runner_version' field from all the cached results") db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].update_many({}, {'$unset': {'job_runner_version': ''}}) def validate(self) -> None: logging.info("Ensure that a random selection of cached results have the 'job_runner_version' field") check_documents(DocCls=CachedResponseDocument, sample_size=10) # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230511100700_queue_delete_indexes_with_force.py import logging from collections.abc import Mapping from typing import Any from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS from mongoengine.connection import get_db from mongodb_migration.migration import IrreversibleMigrationError, Migration field_name = 'force' def get_index_names(index_information: Mapping[str, Any], field_name: str) -> list[str]: return [name for (name, value) in index_information.items() if isinstance(value, dict) and 'key' in value and any((t[0] == field_name for t in value['key'] if isinstance(t, tuple) and len(t)))] class MigrationQueueDeleteIndexesWithForce(Migration): def up(self) -> None: logging.info(f'Delete indexes that contain the {field_name} field.') db = get_db(QUEUE_MONGOENGINE_ALIAS) collection = db[QUEUE_COLLECTION_JOBS] index_names = get_index_names(index_information=collection.index_information(), field_name=field_name) for index_name in index_names: collection.drop_index(index_name) def down(self) -> None: raise IrreversibleMigrationError('This migration does not support rollback') def validate(self) -> None: logging.info('Check that the indexes do not exist anymore') db = get_db(QUEUE_MONGOENGINE_ALIAS) collection = db[QUEUE_COLLECTION_JOBS] index_names = get_index_names(index_information=collection.index_information(), field_name=field_name) if len(index_names) > 0: raise ValueError(f'Found indexes for field {field_name}: {index_names}') # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230516101500_queue_job_add_revision.py import logging from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS from libcommon.queue.jobs import JobDocument from mongoengine.connection import get_db from mongodb_migration.check import check_documents from mongodb_migration.migration import Migration class MigrationQueueAddRevisionToJob(Migration): def up(self) -> None: logging.info("If missing, add the revision field with the value ('main') to the jobs") db = get_db(QUEUE_MONGOENGINE_ALIAS) db[QUEUE_COLLECTION_JOBS].update_many({'revision': {'$exists': False}}, {'$set': {'revision': 'main'}}) def down(self) -> None: logging.info('Remove the revision field from all the jobs') db = get_db(QUEUE_MONGOENGINE_ALIAS) db[QUEUE_COLLECTION_JOBS].update_many({}, {'$unset': {'revision': ''}}) def validate(self) -> None: logging.info("Ensure that a random selection of jobs have the 'revision' field set") check_documents(DocCls=JobDocument, sample_size=10) # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230516101600_queue_delete_index_without_revision.py import logging from collections.abc import Mapping from typing import Any from libcommon.constants import QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS from mongoengine.connection import get_db from mongodb_migration.migration import IrreversibleMigrationError, Migration INDEX_DEFINITION = [('type', 1), ('dataset', 1), ('config', 1), ('split', 1), ('status', 1), ('priority', 1)] def get_index_names(index_information: Mapping[str, Any]) -> list[str]: return [name for (name, value) in index_information.items() if isinstance(value, dict) and 'key' in value and (value['key'] == INDEX_DEFINITION)] class MigrationQueueDeleteIndexWithoutRevision(Migration): def up(self) -> None: logging.info('Delete index.') db = get_db(QUEUE_MONGOENGINE_ALIAS) collection = db[QUEUE_COLLECTION_JOBS] index_names = get_index_names(index_information=collection.index_information()) if len(index_names) != 1: raise ValueError(f'Found {len(index_names)} indexes (should be 1): {index_names}.') collection.drop_index(index_names[0]) def down(self) -> None: raise IrreversibleMigrationError('This migration does not support rollback') def validate(self) -> None: logging.info('Check that the indexes do not exist anymore') db = get_db(QUEUE_MONGOENGINE_ALIAS) collection = db[QUEUE_COLLECTION_JOBS] index_names = get_index_names(index_information=collection.index_information()) if len(index_names) > 0: raise ValueError(f'Found indexes: {index_names}') # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230622131500_lock_add_owner.py import logging from libcommon.constants import QUEUE_COLLECTION_LOCKS, QUEUE_MONGOENGINE_ALIAS from libcommon.queue.lock import Lock from mongoengine.connection import get_db from mongodb_migration.check import check_documents from mongodb_migration.migration import Migration class MigrationAddOwnerToQueueLock(Migration): def up(self) -> None: logging.info('If missing, add the owner field with the same value as the field job_id to the locks') db = get_db(QUEUE_MONGOENGINE_ALIAS) db[QUEUE_COLLECTION_LOCKS].update_many({'owner': {'$exists': False}}, [{'$set': {'owner': '$job_id'}}]) def down(self) -> None: logging.info('Remove the owner field from all the locks') db = get_db(QUEUE_MONGOENGINE_ALIAS) db[QUEUE_COLLECTION_LOCKS].update_many({}, {'$unset': {'owner': ''}}) def validate(self) -> None: logging.info("Ensure that a random selection of locks have the 'owner' field") check_documents(DocCls=Lock, sample_size=10) # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230703110100_cache_add_partial_field_in_config_parquet_and_info.py import logging from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS from libcommon.simple_cache import CachedResponseDocument from mongoengine.connection import get_db from mongodb_migration.check import check_documents from mongodb_migration.migration import Migration class MigrationAddPartialToCacheResponse(Migration): def up(self) -> None: logging.info('If missing, add the partial field with the default value (false) to the cached results of config-parquet-and-info and subsequent steps') db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].update_many({'kind': {'$in': ['config-parquet-and-info', 'config-parquet', 'dataset-parquet', 'config-parquet-metadata', 'config-info', 'dataset-info', 'config-size', 'dataset-size']}, 'http_status': 200, 'content.partial': {'$exists': False}}, {'$set': {'content.partial': False}}) def down(self) -> None: logging.info('Remove the partial field from all the cached results') db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].update_many({'kind': {'$in': ['config-parquet-and-info', 'config-parquet', 'dataset-parquet', 'config-parquet-metadata', 'config-info', 'dataset-info', 'config-size', 'dataset-size']}, 'http_status': 200}, {'$unset': {'content.partial': ''}}) def validate(self) -> None: logging.info("Ensure that a random selection of cached results have the 'partial' field") check_documents(DocCls=CachedResponseDocument, sample_size=10) # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230705160600_queue_job_add_difficulty.py import logging from libcommon.constants import DEFAULT_DIFFICULTY, QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS from libcommon.processing_graph import specification from libcommon.queue.jobs import JobDocument from mongoengine.connection import get_db from mongodb_migration.check import check_documents from mongodb_migration.migration import Migration class MigrationQueueAddDifficultyToJob(Migration): def up(self) -> None: logging.info('If missing, add the difficulty with a value that depends on the job type, else 50') db = get_db(QUEUE_MONGOENGINE_ALIAS) for (job_type, spec) in specification.items(): difficulty = spec.get('difficulty', DEFAULT_DIFFICULTY) db[QUEUE_COLLECTION_JOBS].update_many({'type': job_type, 'difficulty': {'$exists': False}}, {'$set': {'difficulty': difficulty}}) db[QUEUE_COLLECTION_JOBS].update_many({'difficulty': {'$exists': False}}, {'$set': {'difficulty': DEFAULT_DIFFICULTY}}) def down(self) -> None: logging.info('Remove the difficulty field from all the jobs') db = get_db(QUEUE_MONGOENGINE_ALIAS) db[QUEUE_COLLECTION_JOBS].update_many({}, {'$unset': {'difficulty': ''}}) def validate(self) -> None: logging.info("Ensure that a random selection of jobs have the 'difficulty' field set") check_documents(DocCls=JobDocument, sample_size=10) # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230824154900_cache_add_features_field_in_split_duckdb_index.py import logging from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS from libcommon.simple_cache import CachedResponseDocument from mongoengine.connection import get_db from mongodb_migration.check import check_documents from mongodb_migration.migration import Migration class MigrationAddFeaturesToSplitDuckdbIndexCacheResponse(Migration): def up(self) -> None: logging.info('If missing, add the features field with the default value (None) to the cached results of split-duckdb-index') db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].update_many({'kind': 'split-duckdb-index', 'http_status': 200, 'content.features': {'$exists': False}}, {'$set': {'content.features': None}}) def down(self) -> None: logging.info('Remove the features field from all the cached results') db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].update_many({'kind': 'split-duckdb-index', 'http_status': 200}, {'$unset': {'content.features': ''}}) def validate(self) -> None: logging.info("Ensure that a random selection of cached results have the 'features' field") check_documents(DocCls=CachedResponseDocument, sample_size=10) # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230825170200_lock_add_ttl.py import logging from libcommon.constants import QUEUE_COLLECTION_LOCKS, QUEUE_MONGOENGINE_ALIAS from libcommon.queue.lock import Lock from mongoengine.connection import get_db from mongodb_migration.check import check_documents from mongodb_migration.migration import Migration class MigrationAddTtlToQueueLock(Migration): def up(self) -> None: logging.info('If missing, add the ttl field to the locks') db = get_db(QUEUE_MONGOENGINE_ALIAS) db[QUEUE_COLLECTION_LOCKS].update_many({'ttl': {'$exists': False}}, [{'$set': {'ttl': None}}]) def down(self) -> None: logging.info('Remove the ttl field from all the locks') db = get_db(QUEUE_MONGOENGINE_ALIAS) db[QUEUE_COLLECTION_LOCKS].update_many({}, {'$unset': {'ttl': ''}}) def validate(self) -> None: logging.info("Ensure that a random selection of locks have the 'ttl' field") check_documents(DocCls=Lock, sample_size=10) # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20230926095900_cache_add_has_fts_field_in_split_duckdb_index.py import logging from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS from libcommon.simple_cache import CachedResponseDocument from mongoengine.connection import get_db from mongodb_migration.check import check_documents from mongodb_migration.migration import Migration class MigrationAddHasFTSToSplitDuckdbIndexCacheResponse(Migration): def up(self) -> None: logging.info("If missing, add the 'has_fts' field with the default value (True) to the cached results of split-duckdb-index") db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].update_many({'kind': 'split-duckdb-index', 'http_status': 200, 'content.has_fts': {'$exists': False}}, {'$set': {'content.has_fts': True}}) def down(self) -> None: logging.info("Remove the 'has_fts' field from all the cached results") db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].update_many({'kind': 'split-duckdb-index', 'http_status': 200}, {'$unset': {'content.has_fts': ''}}) def validate(self) -> None: logging.info("Ensure that a random selection of cached results have the 'has_fts' field") check_documents(DocCls=CachedResponseDocument, sample_size=10) # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20231106193200_cache_add_partial_field_in_split_duckdb_index.py import logging from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS from libcommon.simple_cache import CachedResponseDocument from mongoengine.connection import get_db from mongodb_migration.check import check_documents from mongodb_migration.migration import Migration class MigrationAddPartialToSplitDuckdbIndexCacheResponse(Migration): def up(self) -> None: logging.info("If missing, add the 'partial', 'num_rows' and 'num_bytes' fields with the default value (None, None, None) to the cached results of split-duckdb-index") db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].update_many({'kind': 'split-duckdb-index', 'http_status': 200, 'content.partial': {'$exists': False}}, {'$set': {'content.partial': None, 'content.num_rows': None, 'content.num_bytes': None}}) def down(self) -> None: logging.info("Remove the 'partial', 'num_rows' and 'num_bytes' fields from all the cached results") db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].update_many({'kind': 'split-duckdb-index', 'http_status': 200}, {'$unset': {'content.partial': '', 'content.num_rows': '', 'content.num_bytes': ''}}) def validate(self) -> None: logging.info("Ensure that a random selection of cached results have the 'partial', 'num_rows' and 'num_bytes' fields") check_documents(DocCls=CachedResponseDocument, sample_size=10) # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20240104085000_cache_add_retries.py import logging from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS from libcommon.simple_cache import CachedResponseDocument from mongoengine.connection import get_db from mongodb_migration.check import check_documents from mongodb_migration.migration import Migration class MigrationAddRetriesToCacheResponse(Migration): def up(self) -> None: logging.info("If missing, add the 'retries' field with the default value (0) to the cached results") db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].update_many({'retries': {'$exists': False}}, {'$set': {'retries': 0}}) def down(self) -> None: logging.info("Remove the 'retries' field from all the cached results") db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].update_many({}, {'$unset': {'retries': ''}}) def validate(self) -> None: logging.info("Ensure that a random selection of cached results have the 'retries' field") check_documents(DocCls=CachedResponseDocument, sample_size=10) # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20240109160700_cache_add_failed_runs.py import logging from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS from libcommon.simple_cache import CachedResponseDocument from mongoengine.connection import get_db from mongodb_migration.check import check_documents from mongodb_migration.migration import Migration class MigrationAddFailedRunsToCacheResponse(Migration): def up(self) -> None: db = get_db(CACHE_MONGOENGINE_ALIAS) logging.info("If missing, add the 'failed_runs' field with the default value (0) to the success cached results") db[CACHE_COLLECTION_RESPONSES].update_many({'http_status': 200, 'failed_runs': {'$exists': False}}, {'$set': {'failed_runs': 0}}) logging.info("If missing, add the 'failed_runs' field with a default value (1) to the failed cached results") db[CACHE_COLLECTION_RESPONSES].update_many({'http_status': {'$ne': 200}, 'failed_runs': {'$exists': False}}, {'$set': {'failed_runs': 1}}) def down(self) -> None: logging.info("Remove the 'failed_runs' field from all the cached results") db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].update_many({}, {'$unset': {'failed_runs': ''}}) def validate(self) -> None: logging.info("Ensure that a random selection of cached results have the 'failed_runs' field") check_documents(DocCls=CachedResponseDocument, sample_size=10) # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20240112164500_cache_add_partial_field_in_split_descriptive_statistics.py import logging from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS from libcommon.parquet_utils import parquet_export_is_partial from libcommon.simple_cache import CachedResponseDocument from mongoengine.connection import get_db from mongodb_migration.check import check_documents from mongodb_migration.migration import Migration class MigrationAddPartialToSplitDescriptiveStatisticsCacheResponse(Migration): def up(self) -> None: logging.info("If missing, add the 'partial' field with the default value None to the cached results of split-descriptive-statistics job runner") db = get_db(CACHE_MONGOENGINE_ALIAS) partial_configs_entries = db[CACHE_COLLECTION_RESPONSES].find({'kind': 'config-parquet', 'content.partial': True}) partial_splits = {(entry['dataset'], entry['config'], file['split']) for entry in partial_configs_entries for file in entry['content']['parquet_files'] if parquet_export_is_partial(file['url'])} stats_successful_entries = db[CACHE_COLLECTION_RESPONSES].find({'kind': 'split-descriptive-statistics', 'http_status': 200, 'content.partial': {'$exists': False}}) partial_stats_successful_ids = [entry['_id'] for entry in stats_successful_entries if (entry['dataset'], entry['config'], entry['split']) in partial_splits] db[CACHE_COLLECTION_RESPONSES].update_many({'_id': {'$nin': partial_stats_successful_ids}, 'kind': 'split-descriptive-statistics', 'http_status': 200, 'content.partial': {'$exists': False}}, {'$set': {'content.partial': False}}) db[CACHE_COLLECTION_RESPONSES].update_many({'_id': {'$in': partial_stats_successful_ids}, 'kind': 'split-descriptive-statistics', 'http_status': 200, 'content.partial': {'$exists': False}}, {'$set': {'content.partial': True}}) def down(self) -> None: logging.info("Remove the 'partial' field from all the cached results") db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].update_many({'kind': 'split-descriptive-statistics'}, {'$unset': {'content.partial': ''}}) def validate(self) -> None: logging.info("Ensure that a random selection of cached results have the 'partial' field") check_documents(DocCls=CachedResponseDocument, sample_size=10) # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20240206153000_cache_add_tags_in_hub_cache.py import logging from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS from libcommon.simple_cache import CachedResponseDocument from mongoengine.connection import get_db from mongodb_migration.check import check_documents from mongodb_migration.migration import Migration class MigrationAddTagsToHubCacheCacheResponse(Migration): def up(self) -> None: db = get_db(CACHE_MONGOENGINE_ALIAS) logging.info("If missing, add the 'tags' field with the default value ([]) to the success cached results of dataset-hub-cache") db[CACHE_COLLECTION_RESPONSES].update_many({'kind': 'dataset-hub-cache', 'http_status': 200, 'content.tags': {'$exists': False}}, {'$set': {'content.tags': []}}) def down(self) -> None: logging.info("Remove the 'tags' field from all the cached results of dataset-hub-cache") db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].update_many({'kind': 'dataset-hub-cache'}, {'$unset': {'content.tags': ''}}) def validate(self) -> None: logging.info("Ensure that a random selection of cached results of dataset-hub-cache have the 'tags' field") check_documents(DocCls=CachedResponseDocument, sample_size=10) # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20240221103200_cache_merge_config_split_names.py import logging from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS from mongoengine.connection import get_db from mongodb_migration.migration import IrreversibleMigrationError, Migration STREAMING = 'config-split-names-from-streaming' INFO = 'config-split-names-from-info' MERGED = 'config-split-names' JOB_RUNNER_VERSION = 3 class MigrationMergeConfigSplitNamesResponses(Migration): def up(self) -> None: db = get_db(CACHE_MONGOENGINE_ALIAS) logging.info("Remove all the entries with 'error_code=ResponseAlreadyComputedError' for 'config-split-names-from-streaming'") db[CACHE_COLLECTION_RESPONSES].delete_many({'kind': {'$in': [STREAMING, INFO]}, 'error_code': 'ResponseAlreadyComputedError'}) logging.info("Update or delete all the 'config-split-names-from-info' responses") for info_entry in db[CACHE_COLLECTION_RESPONSES].find({'kind': INFO}): streaming_entry = db[CACHE_COLLECTION_RESPONSES].find_one({'kind': STREAMING, 'dataset': info_entry['dataset'], 'config': info_entry['config']}) if streaming_entry is None: db[CACHE_COLLECTION_RESPONSES].update_one({'_id': info_entry['_id']}, {'$set': {'kind': MERGED, 'job_runner_version': JOB_RUNNER_VERSION}}) elif info_entry['http_status'] == 200: db[CACHE_COLLECTION_RESPONSES].update_one({'_id': info_entry['_id']}, {'$set': {'kind': MERGED, 'job_runner_version': JOB_RUNNER_VERSION}}) db[CACHE_COLLECTION_RESPONSES].delete_one({'_id': streaming_entry['_id']}) else: db[CACHE_COLLECTION_RESPONSES].update_one({'_id': streaming_entry['_id']}, {'$set': {'kind': MERGED, 'job_runner_version': JOB_RUNNER_VERSION}}) db[CACHE_COLLECTION_RESPONSES].delete_one({'_id': info_entry['_id']}) logging.info("Update the remaning 'config-split-names-from-streaming' entries to 'config-split-names'") db[CACHE_COLLECTION_RESPONSES].update_many({'kind': STREAMING}, {'$set': {'kind': MERGED, 'job_runner_version': JOB_RUNNER_VERSION}}) def down(self) -> None: raise IrreversibleMigrationError('This migration does not support rollback') def validate(self) -> None: logging.info("Ensure that no 'config-split-names-from-streaming' and 'config-split-names-from-info' entries exist") db = get_db(CACHE_MONGOENGINE_ALIAS) if db[CACHE_COLLECTION_RESPONSES].count_documents({'kind': {'$in': [STREAMING, INFO]}}) > 0: raise ValueError("Some 'config-split-names-from-streaming' and 'config-split-names-from-info' entries still exist") logging.info("Check 'config-split-names' responses exist") if db[CACHE_COLLECTION_RESPONSES].count_documents({'kind': MERGED}) == 0: raise ValueError("No 'config-split-names' entries exist") # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20240221160700_cache_merge_split_first_rows.py import logging from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS from mongoengine.connection import get_db from mongodb_migration.migration import IrreversibleMigrationError, Migration STREAMING = 'split-first-rows-from-streaming' PARQUET = 'split-first-rows-from-parquet' MERGED = 'split-first-rows' JOB_RUNNER_VERSION = 4 class MigrationMergeSplitFirstRowsResponses(Migration): def up(self) -> None: db = get_db(CACHE_MONGOENGINE_ALIAS) logging.info("Remove all the entries with 'error_code=ResponseAlreadyComputedError' for 'split-first-rows-from-streaming'") db[CACHE_COLLECTION_RESPONSES].delete_many({'kind': {'$in': [STREAMING, PARQUET]}, 'error_code': 'ResponseAlreadyComputedError'}) logging.info("Update or delete all the 'split-first-rows-from-parquet' responses") for parquet_entry in db[CACHE_COLLECTION_RESPONSES].find({'kind': PARQUET}): streaming_entry = db[CACHE_COLLECTION_RESPONSES].find_one({'kind': STREAMING, 'dataset': parquet_entry['dataset'], 'config': parquet_entry['config'], 'split': parquet_entry['split']}) if streaming_entry is None: db[CACHE_COLLECTION_RESPONSES].update_one({'_id': parquet_entry['_id']}, {'$set': {'kind': MERGED, 'job_runner_version': JOB_RUNNER_VERSION}}) elif parquet_entry['http_status'] == 200: db[CACHE_COLLECTION_RESPONSES].update_one({'_id': parquet_entry['_id']}, {'$set': {'kind': MERGED, 'job_runner_version': JOB_RUNNER_VERSION}}) db[CACHE_COLLECTION_RESPONSES].delete_one({'_id': streaming_entry['_id']}) else: db[CACHE_COLLECTION_RESPONSES].update_one({'_id': streaming_entry['_id']}, {'$set': {'kind': MERGED, 'job_runner_version': JOB_RUNNER_VERSION}}) db[CACHE_COLLECTION_RESPONSES].delete_one({'_id': parquet_entry['_id']}) logging.info("Update the remaning 'split-first-rows-from-streaming' entries to 'split-first-rows'") db[CACHE_COLLECTION_RESPONSES].update_many({'kind': STREAMING}, {'$set': {'kind': MERGED, 'job_runner_version': JOB_RUNNER_VERSION}}) def down(self) -> None: raise IrreversibleMigrationError('This migration does not support rollback') def validate(self) -> None: logging.info("Ensure that no 'split-first-rows-from-streaming' and 'split-first-rows-from-parquet' entries exist") db = get_db(CACHE_MONGOENGINE_ALIAS) if db[CACHE_COLLECTION_RESPONSES].count_documents({'kind': {'$in': [STREAMING, PARQUET]}}) > 0: raise ValueError("Some 'split-first-rows-from-streaming' and 'split-first-rows-from-parquet' entries still exist") logging.info("Check 'split-first-rows' responses exist") if db[CACHE_COLLECTION_RESPONSES].count_documents({'kind': MERGED}) == 0: raise ValueError("No 'split-first-rows' entries exist") # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20240221160800_cache_set_updated_at_to_root_step.py import logging from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS from mongoengine.connection import get_db from mongodb_migration.migration import IrreversibleMigrationError, Migration ROOT_STEP = 'dataset-config-names' class MigrationSetUpdatedAtToOldestStep(Migration): def up(self) -> None: db = get_db(CACHE_MONGOENGINE_ALIAS) logging.info('Setting the updated_at value for all the steps to the one of the root step, for each dataset') for root_entry in db[CACHE_COLLECTION_RESPONSES].find({'kind': ROOT_STEP}): db[CACHE_COLLECTION_RESPONSES].update_many({'dataset': root_entry['dataset']}, {'$set': {'updated_at': root_entry['updated_at']}}) def down(self) -> None: raise IrreversibleMigrationError('This migration does not support rollback') def validate(self) -> None: logging.info('No need to validate.') # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20240619124500_cache_add_estimated_dataset_info_field_parquet_and_info.py import logging from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS from libcommon.simple_cache import CachedResponseDocument from mongoengine.connection import get_db from mongodb_migration.check import check_documents from mongodb_migration.migration import Migration class MigrationAddEstimatedDatasetInfoToParquetAndInfoCacheResponse(Migration): def up(self) -> None: logging.info("If missing, add the 'estimated_dataset_info' field with the default value None to the cached results of config-parquet-and-info") db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].update_many({'kind': 'config-parquet-and-info', 'http_status': 200, 'content.estimated_dataset_info': {'$exists': False}}, {'$set': {'content.estimated_dataset_info': None}}) def down(self) -> None: logging.info("Remove the 'estimated_dataset_info' field from all the cached results") db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].update_many({'kind': 'config-parquet-and-info', 'http_status': 200}, {'$unset': {'content.estimated_dataset_info': ''}}) def validate(self) -> None: logging.info("Ensure that a random selection of cached results have the 'estimated_dataset_info field") check_documents(DocCls=CachedResponseDocument, sample_size=10) # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20240624144000_cache_add_estimated_num_rows_field_in_size.py import logging from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS from libcommon.simple_cache import CachedResponseDocument from mongoengine.connection import get_db from mongodb_migration.check import check_documents from mongodb_migration.migration import Migration class MigrationAddEstimatedNumRowsToSizeCacheResponse(Migration): def up(self) -> None: logging.info("If missing, add the 'estimated_num_rows' field with the default value None to the cached results of dataset-size and config-size") db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].update_many({'kind': 'config-size', 'http_status': 200, 'content.size.config.estimated_num_rows': {'$exists': False}}, {'$set': {'content.size.config.estimated_num_rows': None, 'content.size.splits.$[].estimated_num_rows': None}}) db[CACHE_COLLECTION_RESPONSES].update_many({'kind': 'dataset-size', 'http_status': 200, 'content.size.dataset.estimated_num_rows': {'$exists': False}}, {'$set': {'content.size.dataset.estimated_num_rows': None, 'content.size.configs.$[].estimated_num_rows': None, 'content.size.splits.$[].estimated_num_rows': None}}) def down(self) -> None: logging.info("Remove the 'config-size' field from all the cached results") db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].update_many({'kind': 'config-size', 'http_status': 200}, {'$unset': {'content.size.config.estimated_num_rows': '', 'content.size.splits.$[].estimated_num_rows': ''}}) db[CACHE_COLLECTION_RESPONSES].update_many({'kind': 'dataset-size', 'http_status': 200}, {'$unset': {'content.size.dataset.estimated_num_rows': '', 'content.size.configs.$[].estimated_num_rows': '', 'content.size.splits.$[].estimated_num_rows': ''}}) def validate(self) -> None: logging.info("Ensure that a random selection of cached results have the 'estimated_num_rows' field") check_documents(DocCls=CachedResponseDocument, sample_size=10) # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20240626095000_cache_add_stemmer_in_split_duckdb_index.py import logging from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS from libcommon.simple_cache import CachedResponseDocument from mongoengine.connection import get_db from mongodb_migration.check import check_documents from mongodb_migration.migration import Migration class MigrationAddStemmerToSplitDuckdbIndexCacheResponse(Migration): def up(self) -> None: logging.info("If missing, add the 'stemmer' to the cached results of split-duckdb-index") db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].update_many({'kind': 'split-duckdb-index', 'http_status': 200, 'content.has_fts': True, 'content.stemmer': {'$exists': False}}, {'$set': {'content.stemmer': 'porter'}}) db[CACHE_COLLECTION_RESPONSES].update_many({'kind': 'split-duckdb-index', 'http_status': 200, 'content.has_fts': False, 'content.stemmer': {'$exists': False}}, {'$set': {'content.stemmer': None}}) def down(self) -> None: logging.info("Remove the 'stemmer' field from all the cached results") db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].update_many({'kind': 'split-duckdb-index', 'http_status': 200}, {'$unset': {'content.stemmer': ''}}) def validate(self) -> None: logging.info("Ensure that a random selection of cached results have the 'stemmer' field") check_documents(DocCls=CachedResponseDocument, sample_size=10) # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20240626151600_cache_remove_has_fts_field_in_split_duckdb_index.py import logging from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS from libcommon.simple_cache import CachedResponseDocument from mongoengine.connection import get_db from mongodb_migration.check import check_documents from mongodb_migration.migration import Migration class MigrationRemoveHasFTSFromSplitDuckdbIndexCacheResponse(Migration): def up(self) -> None: logging.info("Remove 'has_fts' field from cached results of split-duckdb-index") db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].update_many({'kind': 'split-duckdb-index', 'http_status': 200, 'content.stemmer': {'$exists': True}}, {'$unset': {'content.has_fts': ''}}) def down(self) -> None: logging.info("Rollback 'has_fts' field for all the cached results") db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].update_many({'kind': 'split-duckdb-index', 'http_status': 200, 'content.stemmer': None}, {'$set': {'content.has_fts': False}}) db[CACHE_COLLECTION_RESPONSES].update_many({'kind': 'split-duckdb-index', 'http_status': 200, 'content.stemmer': {'$ne': None}}, {'$set': {'content.has_fts': True}}) def validate(self) -> None: logging.info("Ensure that a random selection of cached results have the 'stemmer' field") check_documents(DocCls=CachedResponseDocument, sample_size=10) # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20240703160300_cache_add_duration.py import logging from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_MONGOENGINE_ALIAS from libcommon.simple_cache import CachedResponseDocument from mongoengine.connection import get_db from mongodb_migration.check import check_documents from mongodb_migration.migration import Migration class MigrationAddDurationToCacheResponse(Migration): def up(self) -> None: logging.info('If missing, add the duration field with the default value None to the cached results') db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].update_many({'duration': {'$exists': False}}, {'$set': {'duration': None}}) def down(self) -> None: logging.info('Remove the duration field from all the cached results') db = get_db(CACHE_MONGOENGINE_ALIAS) db[CACHE_COLLECTION_RESPONSES].update_many({}, {'$unset': {'duration': ''}}) def validate(self) -> None: logging.info("Ensure that a random selection of cached results have the 'duration' field") check_documents(DocCls=CachedResponseDocument, sample_size=10) # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/migrations/_20240731143600_queue_add_dataset_status_to_queue_metrics.py import logging from libcommon.constants import QUEUE_MONGOENGINE_ALIAS, TYPE_STATUS_AND_DATASET_STATUS_JOB_COUNTS_COLLECTION from libcommon.queue.dataset_blockages import DATASET_STATUS_NORMAL from libcommon.queue.metrics import JobTotalMetricDocument from mongoengine.connection import get_db from mongodb_migration.check import check_documents from mongodb_migration.migration import Migration class MigrationAddDatasetStatusToQueueMetrics(Migration): def up(self) -> None: logging.info("If missing, add the 'dataset_status' field with the default value 'normal' to the jobs metrics") db = get_db(QUEUE_MONGOENGINE_ALIAS) db[TYPE_STATUS_AND_DATASET_STATUS_JOB_COUNTS_COLLECTION].update_many({'dataset_status': {'$exists': False}}, {'$set': {'dataset_status': DATASET_STATUS_NORMAL}}) def down(self) -> None: logging.info("Remove the 'dataset_status' field from all the jobs metrics") db = get_db(QUEUE_MONGOENGINE_ALIAS) db[TYPE_STATUS_AND_DATASET_STATUS_JOB_COUNTS_COLLECTION].update_many({}, {'$unset': {'dataset_status': ''}}) def validate(self) -> None: logging.info("Ensure that a random selection of jobs metrics have the 'dataset_status' field") check_documents(DocCls=JobTotalMetricDocument, sample_size=10) # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/plan.py import logging from mongodb_migration.database_migrations import DatabaseMigration from mongodb_migration.migration import Migration class SavedMigrationsError(Exception): pass class Plan: collected_migrations: list[Migration] executed_migrations: list[Migration] def __init__(self, collected_migrations: list[Migration]): self.collected_migrations = collected_migrations self.executed_migrations = [] def get_saved_migrations_versions(self) -> list[str]: return DatabaseMigration.objects().distinct('version') def get_planned_migrations(self) -> list[Migration]: saved_migrations_versions = sorted(self.get_saved_migrations_versions()) collected_migrations = sorted(self.collected_migrations, key=lambda m: m.version) first_collected_migrations_versions = [migration.version for migration in collected_migrations[:len(saved_migrations_versions)]] if saved_migrations_versions != first_collected_migrations_versions: logging.error(f'Database migrations are not in sync with collected migrations. Database: {saved_migrations_versions}, Collected: {first_collected_migrations_versions}') raise SavedMigrationsError('The saved migrations in the database should be the first collected migrations.') num_saved_migrations = len(saved_migrations_versions) num_collected_migrations = len(collected_migrations) if not num_collected_migrations: logging.error('No collected migrations') if num_saved_migrations: logging.info(f'{num_saved_migrations} migrations have already been applied. They will be skipped.') if num_saved_migrations == len(collected_migrations): logging.info('All migrations have already been applied.') return collected_migrations[num_saved_migrations:] def execute(self) -> None: try: self.apply() except Exception as e: logging.error(f'Migration failed: {e}') self.rollback() raise e def apply(self) -> None: logging.info('Start migrations') self.executed_migrations = [] for migration in self.get_planned_migrations(): self.executed_migrations.append(migration) logging.info(f'Migrate {migration.version}: add to the migrations collection') self.save(migration) logging.info(f'Migrate {migration.version}: apply') migration.up() logging.info(f'Migrate {migration.version}: validate') migration.validate() logging.info(f'Migrate {migration.version}: done') logging.info('All migrations have been applied') def rollback(self) -> None: logging.info('Start rollback') try: while self.executed_migrations: migration = self.executed_migrations[-1] logging.info(f'Rollback {migration.version}: roll back') migration.down() logging.info(f'Rollback {migration.version}: removed from the migrations collection') self.remove(migration) logging.info(f'Rollback {migration.version}: done') self.executed_migrations.pop() logging.info('All executed migrations have been rolled back') except Exception as e: logging.error(f'Rollback failed: {e}. The database is in an inconsistent state. Try to restore the backup manually.') raise e def save(self, migration: Migration) -> None: DatabaseMigration(version=migration.version, description=migration.description).save() def remove(self, migration: Migration) -> None: DatabaseMigration.objects(version=migration.version).delete() # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/renaming_migrations.py import logging from typing import Optional from libcommon.queue.jobs import JobDocument from libcommon.simple_cache import CachedResponseDocument from mongoengine.connection import get_db from mongodb_migration.check import check_documents from mongodb_migration.migration import CacheMigration, QueueMigration class CacheRenamingMigration(CacheMigration): def __init__(self, cache_kind: str, new_cache_kind: str, version: str, description: Optional[str]=None): self.new_cache_kind: str = new_cache_kind if not description: description = f"update 'kind' field in cache from '{cache_kind}' to '{new_cache_kind}'" super().__init__(cache_kind=cache_kind, version=version, description=description) def up(self) -> None: logging.info(f"Rename cache_kind field from '{self.cache_kind}' to '{self.new_cache_kind}'") db = get_db(self.MONGOENGINE_ALIAS) result = db[self.COLLECTION_RESPONSES].update_many({'kind': self.cache_kind}, {'$set': {'kind': self.new_cache_kind}}) logging.info(f'{result.matched_count} cache entries to be renamed - {result.modified_count} cache entries renamed') def down(self) -> None: logging.info(f"Rollback cache_kind field from '{self.new_cache_kind}' to '{self.cache_kind}'") db = get_db(self.MONGOENGINE_ALIAS) result = db[self.COLLECTION_RESPONSES].update_many({'kind': self.new_cache_kind}, {'$set': {'kind': self.cache_kind}}) logging.info(f'{result.matched_count} cache entries to be renamed - {result.modified_count} cache entries renamed') def validate(self) -> None: logging.info('Validate modified documents') check_documents(DocCls=CachedResponseDocument, sample_size=10) class QueueRenamingMigration(QueueMigration): def __init__(self, job_type: str, new_job_type: str, version: str, description: Optional[str]=None): self.new_job_type: str = new_job_type if not description: description = f"update 'type' and 'unicity_id' fields in job from '{job_type}' to '{new_job_type}'" super().__init__(job_type=job_type, version=version, description=description) def up(self) -> None: logging.info(f"Rename unicity_id field from '{self.job_type}' to '{self.new_job_type}' and change type from '{self.job_type}' to '{self.new_job_type}'") db = get_db(self.MONGOENGINE_ALIAS) result = db[self.COLLECTION_JOBS].update_many({'type': self.job_type}, [{'$set': {'unicity_id': {'$replaceOne': {'input': '$unicity_id', 'find': f'{self.job_type}', 'replacement': f'{self.new_job_type}'}}, 'type': self.new_job_type}}]) logging.info(f'{result.matched_count} jobs to be renamed - {result.modified_count} jobs renamed') def down(self) -> None: logging.info(f"Rename unicity_id field from '{self.new_job_type}' to '{self.job_type}' and change type from '{self.new_job_type}' to '{self.job_type}'") db = get_db(self.MONGOENGINE_ALIAS) result = db[self.COLLECTION_JOBS].update_many({'type': self.new_job_type}, [{'$set': {'unicity_id': {'$replaceOne': {'input': '$unicity_id', 'find': f'{self.new_job_type}', 'replacement': f'{self.job_type}'}}, 'type': self.new_job_type}}]) logging.info(f'{result.matched_count} jobs to be renamed - {result.modified_count} jobs renamed') def validate(self) -> None: logging.info('Validate modified documents') check_documents(DocCls=JobDocument, sample_size=10) # File: dataset-viewer-main/jobs/mongodb_migration/src/mongodb_migration/resources.py from dataclasses import dataclass, field from libcommon.resources import MongoResource from mongodb_migration.constants import DATABASE_MIGRATIONS_MONGOENGINE_ALIAS @dataclass class MigrationsMongoResource(MongoResource): mongoengine_alias: str = field(default=DATABASE_MIGRATIONS_MONGOENGINE_ALIAS, init=False) # File: dataset-viewer-main/libs/libapi/src/libapi/authentication.py import logging from collections.abc import Generator from typing import Literal, Optional import httpx from libcommon.prometheus import StepProfiler from starlette.requests import Request from libapi.exceptions import AuthCheckHubRequestError, ExternalAuthenticatedError, ExternalUnauthenticatedError, RenamedDatasetError from libapi.jwt_token import validate_jwt class RequestAuth(httpx.Auth): def __init__(self, request: Optional[Request]) -> None: self.authorization = request.headers.get('authorization') if request else None def auth_flow(self, request: httpx.Request) -> Generator[httpx.Request, httpx.Response, None]: if self.authorization: request.headers['authorization'] = self.authorization yield request def get_jwt_token(request: Optional[Request]=None) -> Optional[str]: if not request: return None if (token := request.headers.get('x-api-key')): return token authorization = request.headers.get('authorization') if not authorization: return None token = authorization.removeprefix('Bearer jwt:') return None if token == authorization else token async def auth_check(dataset: str, external_auth_url: Optional[str]=None, request: Optional[Request]=None, hf_jwt_public_keys: Optional[list[str]]=None, hf_jwt_algorithm: Optional[str]=None, hf_timeout_seconds: Optional[float]=None) -> Literal[True]: with StepProfiler(method='auth_check', step='all'): with StepProfiler(method='auth_check', step='check JWT'): if (jwt_token := get_jwt_token(request)) and hf_jwt_public_keys and hf_jwt_algorithm: validate_jwt(dataset=dataset, token=jwt_token, public_keys=hf_jwt_public_keys, algorithm=hf_jwt_algorithm) logging.debug(f'By-passing the authentication step, because a valid JWT was passed in headers for dataset {dataset}. JWT was: {jwt_token}') return True with StepProfiler(method='auth_check', step='prepare parameters'): if external_auth_url is None: return True try: url = external_auth_url % dataset except TypeError as e: raise ValueError('external_auth_url must contain %s') from e with StepProfiler(method='auth_check', step='create auth parameter'): auth = RequestAuth(request) with StepProfiler(method='auth_check', step='requests.get'): try: logging.debug(f'Checking authentication on the Hugging Face Hub for dataset {dataset}, url: {url}, timeout: {hf_timeout_seconds}, authorization: {auth.authorization}') async with httpx.AsyncClient() as client: response = await client.get(url, auth=auth, timeout=hf_timeout_seconds) except Exception as err: raise AuthCheckHubRequestError("Authentication check on the Hugging Face Hub failed or timed out. Please try again later, it's a temporary internal issue.", err) from err with StepProfiler(method='auth_check', step='return or raise'): if response.status_code == 200: return True elif response.status_code == 307: raise RenamedDatasetError('The dataset has been renamed. Please use the current dataset name.') elif response.status_code == 401: raise ExternalUnauthenticatedError('The dataset does not exist, or is not accessible without authentication (private or gated). Please check the spelling of the dataset name or retry with authentication.') elif response.status_code in {403, 404}: raise ExternalAuthenticatedError('The dataset does not exist, or is not accessible with the current credentials (private or gated). Please check the spelling of the dataset name or retry with other authentication credentials.') else: raise ValueError(f'Unexpected status code {response.status_code}') # File: dataset-viewer-main/libs/libapi/src/libapi/config.py from dataclasses import dataclass, field from typing import Optional from environs import Env API_UVICORN_HOSTNAME = 'localhost' API_UVICORN_NUM_WORKERS = 2 API_UVICORN_PORT = 8000 @dataclass(frozen=True) class UvicornConfig: hostname: str = API_UVICORN_HOSTNAME num_workers: int = API_UVICORN_NUM_WORKERS port: int = API_UVICORN_PORT @classmethod def from_env(cls) -> 'UvicornConfig': env = Env(expand_vars=True) with env.prefixed('API_UVICORN_'): return cls(hostname=env.str(name='HOSTNAME', default=API_UVICORN_HOSTNAME), num_workers=env.int(name='NUM_WORKERS', default=API_UVICORN_NUM_WORKERS), port=env.int(name='PORT', default=API_UVICORN_PORT)) API_EXTERNAL_AUTH_URL = None API_HF_AUTH_PATH = '/api/datasets/%s/auth-check' API_HF_JWT_PUBLIC_KEY_URL = None API_HF_JWT_ADDITIONAL_PUBLIC_KEYS: list[str] = [] API_HF_JWT_ALGORITHM = 'EdDSA' API_HF_TIMEOUT_SECONDS = 0.2 API_HF_WEBHOOK_SECRET = None API_MAX_AGE_LONG = 120 API_MAX_AGE_SHORT = 10 @dataclass(frozen=True) class ApiConfig: external_auth_url: Optional[str] = API_EXTERNAL_AUTH_URL hf_auth_path: str = API_HF_AUTH_PATH hf_jwt_public_key_url: Optional[str] = API_HF_JWT_PUBLIC_KEY_URL hf_jwt_additional_public_keys: list[str] = field(default_factory=API_HF_JWT_ADDITIONAL_PUBLIC_KEYS.copy) hf_jwt_algorithm: Optional[str] = API_HF_JWT_ALGORITHM hf_timeout_seconds: Optional[float] = API_HF_TIMEOUT_SECONDS hf_webhook_secret: Optional[str] = API_HF_WEBHOOK_SECRET max_age_long: int = API_MAX_AGE_LONG max_age_short: int = API_MAX_AGE_SHORT @classmethod def from_env(cls, hf_endpoint: str) -> 'ApiConfig': env = Env(expand_vars=True) with env.prefixed('API_'): hf_auth_path = env.str(name='HF_AUTH_PATH', default=API_HF_AUTH_PATH) external_auth_url = None if hf_auth_path is None else f'{hf_endpoint}{hf_auth_path}' return cls(external_auth_url=external_auth_url, hf_auth_path=hf_auth_path, hf_jwt_public_key_url=env.str(name='HF_JWT_PUBLIC_KEY_URL', default=API_HF_JWT_PUBLIC_KEY_URL), hf_jwt_additional_public_keys=env.list(name='HF_JWT_ADDITIONAL_PUBLIC_KEYS', default=API_HF_JWT_ADDITIONAL_PUBLIC_KEYS.copy()), hf_jwt_algorithm=env.str(name='HF_JWT_ALGORITHM', default=API_HF_JWT_ALGORITHM), hf_timeout_seconds=env.float(name='HF_TIMEOUT_SECONDS', default=API_HF_TIMEOUT_SECONDS), hf_webhook_secret=env.str(name='HF_WEBHOOK_SECRET', default=API_HF_WEBHOOK_SECRET), max_age_long=env.int(name='MAX_AGE_LONG', default=API_MAX_AGE_LONG), max_age_short=env.int(name='MAX_AGE_SHORT', default=API_MAX_AGE_SHORT)) # File: dataset-viewer-main/libs/libapi/src/libapi/duckdb.py import errno import json import logging import os import re from hashlib import sha1 from typing import Optional import anyio from anyio import Path from libcommon.constants import SPLIT_DUCKDB_INDEX_KIND from libcommon.parquet_utils import extract_split_directory_from_parquet_url from libcommon.prometheus import StepProfiler from libcommon.simple_cache import CacheEntry from libcommon.storage import StrPath, init_dir from libcommon.storage_client import StorageClient from libcommon.utils import download_file_from_hub from libapi.exceptions import DownloadIndexError from libapi.utils import get_cache_entry_from_step REPO_TYPE = 'dataset' DUCKDB_INDEX_DOWNLOADS_SUBDIRECTORY = 'downloads' HUB_DOWNLOAD_CACHE_FOLDER = 'cache' async def get_index_file_location_and_download_if_missing(duckdb_index_file_directory: StrPath, dataset: str, revision: str, config: str, split: str, filename: str, size_bytes: int, url: str, target_revision: str, hf_token: Optional[str]) -> str: with StepProfiler(method='get_index_file_location_and_download_if_missing', step='all'): index_folder = get_download_folder(duckdb_index_file_directory, size_bytes, dataset, config, split, revision) split_directory = extract_split_directory_from_parquet_url(url) repo_file_location = f'{config}/{split_directory}/{filename}' index_file_location = f'{index_folder}/{repo_file_location}' index_path = Path(index_file_location) if not await index_path.is_file(): with StepProfiler(method='get_index_file_location_and_download_if_missing', step='download index file'): cache_folder = f'{duckdb_index_file_directory}/{HUB_DOWNLOAD_CACHE_FOLDER}' await anyio.to_thread.run_sync(download_index_file, cache_folder, index_folder, target_revision, dataset, repo_file_location, hf_token) await index_path.touch() return index_file_location def get_download_folder(root_directory: StrPath, size_bytes: int, dataset: str, revision: str, config: str, split: str) -> str: check_available_disk_space(root_directory, size_bytes) payload = (dataset, config, split, revision) hash_suffix = sha1(json.dumps(payload, sort_keys=True).encode(), usedforsecurity=False).hexdigest()[:8] subdirectory = ''.join([c if re.match('[\\w-]', c) else '-' for c in f'{dataset}-{hash_suffix}']) return f'{root_directory}/{DUCKDB_INDEX_DOWNLOADS_SUBDIRECTORY}/{subdirectory}' def check_available_disk_space(path: StrPath, required_space: int) -> None: try: disk_stat = os.statvfs(path) except FileNotFoundError: init_dir(path) disk_stat = os.statvfs(path) free_space = disk_stat.f_bavail * disk_stat.f_frsize logging.debug(f'{free_space} available space, needed {required_space}') if free_space < required_space: raise DownloadIndexError('Cannot perform the search due to a lack of disk space on the server. Please report the issue.') def download_index_file(cache_folder: str, index_folder: str, target_revision: str, dataset: str, repo_file_location: str, hf_token: Optional[str]=None) -> None: logging.info(f'init_dir {index_folder}') try: init_dir(index_folder) download_file_from_hub(repo_type=REPO_TYPE, revision=target_revision, repo_id=dataset, filename=repo_file_location, local_dir=index_folder, hf_token=hf_token, cache_dir=cache_folder) except OSError as err: if err.errno == errno.ENOSPC: raise DownloadIndexError('Cannot perform the operation due to a lack of disk space on the server. Please report the issue.', err) def get_cache_entry_from_duckdb_index_job(dataset: str, config: str, split: str, hf_endpoint: str, hf_token: Optional[str], hf_timeout_seconds: Optional[float], blocked_datasets: list[str], storage_clients: Optional[list[StorageClient]]=None) -> CacheEntry: return get_cache_entry_from_step(processing_step_name=SPLIT_DUCKDB_INDEX_KIND, dataset=dataset, config=config, split=split, hf_endpoint=hf_endpoint, hf_token=hf_token, hf_timeout_seconds=hf_timeout_seconds, blocked_datasets=blocked_datasets, storage_clients=storage_clients) # File: dataset-viewer-main/libs/libapi/src/libapi/exceptions.py import logging from http import HTTPStatus from typing import Literal, Optional from libcommon.exceptions import CustomError ApiErrorCode = Literal['AuthCheckHubRequestError', 'DownloadIndexError', 'ExternalAuthenticatedError', 'ExternalUnauthenticatedError', 'InvalidParameter', 'JWTExpiredSignature', 'JWTInvalidClaimRead', 'JWTInvalidClaimSub', 'JWTInvalidKeyOrAlgorithm', 'JWTInvalidSignature', 'JWTKeysError', 'JWTMissingRequiredClaim', 'MissingProcessingStepsError', 'MissingRequiredParameter', 'RenamedDatasetError', 'ResponseNotFound', 'ResponseNotReady', 'SearchFeatureNotAvailableError', 'TooBigContentError', 'TransformRowsProcessingError', 'UnexpectedApiError'] class ApiError(CustomError): def __init__(self, message: str, status_code: HTTPStatus, code: ApiErrorCode, cause: Optional[BaseException]=None, disclose_cause: bool=False): super().__init__(message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause) class AuthCheckHubRequestError(ApiError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'AuthCheckHubRequestError', cause=cause, disclose_cause=False) class DownloadIndexError(ApiError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'DownloadIndexError', cause=cause, disclose_cause=True) class ExternalAuthenticatedError(ApiError): def __init__(self, message: str): super().__init__(message, HTTPStatus.NOT_FOUND, 'ExternalAuthenticatedError') class ExternalUnauthenticatedError(ApiError): def __init__(self, message: str): super().__init__(message, HTTPStatus.UNAUTHORIZED, 'ExternalUnauthenticatedError') class InvalidParameterError(ApiError): def __init__(self, message: str): super().__init__(message, HTTPStatus.UNPROCESSABLE_ENTITY, 'InvalidParameter') class JWTKeysError(ApiError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'JWTKeysError', cause=cause, disclose_cause=False) class MissingRequiredParameterError(ApiError): def __init__(self, message: str): super().__init__(message, HTTPStatus.UNPROCESSABLE_ENTITY, 'MissingRequiredParameter') class ResponseNotFoundError(ApiError): def __init__(self, message: str): super().__init__(message, HTTPStatus.NOT_FOUND, 'ResponseNotFound') class RenamedDatasetError(ApiError): def __init__(self, message: str): super().__init__(message, HTTPStatus.NOT_FOUND, 'RenamedDatasetError') class ResponseNotReadyError(ApiError): def __init__(self, message: str): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'ResponseNotReady') class SearchFeatureNotAvailableError(ApiError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.BAD_REQUEST, 'SearchFeatureNotAvailableError', cause, True) class TooBigContentError(ApiError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'TooBigContentError', cause=cause, disclose_cause=True) class TransformRowsProcessingError(ApiError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'TransformRowsProcessingError', cause, True) class JWTExpiredSignature(ApiError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.UNAUTHORIZED, 'JWTExpiredSignature', cause, True) class JWTInvalidClaimRead(ApiError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.UNAUTHORIZED, 'JWTInvalidClaimRead', cause, True) class JWTInvalidClaimSub(ApiError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.UNAUTHORIZED, 'JWTInvalidClaimSub', cause, True) class JWTInvalidKeyOrAlgorithm(ApiError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.UNAUTHORIZED, 'JWTInvalidKeyOrAlgorithm', cause, True) class JWTInvalidSignature(ApiError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.UNAUTHORIZED, 'JWTInvalidSignature', cause, True) class JWTMissingRequiredClaim(ApiError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.UNAUTHORIZED, 'JWTMissingRequiredClaim', cause, True) class UnexpectedApiError(ApiError): def __init__(self, message: str, cause: Optional[BaseException]=None): logging.error(message, exc_info=cause) super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'UnexpectedApiError', cause) # File: dataset-viewer-main/libs/libapi/src/libapi/jwt_token.py import logging from typing import Any, Optional, Union import httpx import jwt from cryptography.hazmat.primitives import serialization from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurvePrivateKey, EllipticCurvePublicKey from cryptography.hazmat.primitives.asymmetric.ed448 import Ed448PrivateKey, Ed448PublicKey from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey, Ed25519PublicKey from cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKey, RSAPublicKey from jwt.algorithms import ECAlgorithm, HMACAlgorithm, OKPAlgorithm, RSAAlgorithm, RSAPSSAlgorithm from libapi.exceptions import JWTExpiredSignature, JWTInvalidClaimRead, JWTInvalidClaimSub, JWTInvalidKeyOrAlgorithm, JWTInvalidSignature, JWTKeysError, JWTMissingRequiredClaim, UnexpectedApiError ASYMMETRIC_ALGORITHMS = (ECAlgorithm, OKPAlgorithm, RSAAlgorithm, RSAPSSAlgorithm) SYMMETRIC_ALGORITHMS = (HMACAlgorithm,) SupportedAlgorithm = Union[ECAlgorithm, OKPAlgorithm, RSAAlgorithm, RSAPSSAlgorithm, HMACAlgorithm] SupportedKey = Union[Ed448PrivateKey, Ed448PublicKey, Ed25519PrivateKey, Ed25519PublicKey, EllipticCurvePrivateKey, EllipticCurvePublicKey, RSAPrivateKey, RSAPublicKey, bytes] def is_public_key(key: SupportedKey) -> bool: return hasattr(key, 'public_bytes') def create_algorithm(algorithm_name: str) -> SupportedAlgorithm: try: algorithm = jwt.get_algorithm_by_name(algorithm_name) if not isinstance(algorithm, (*ASYMMETRIC_ALGORITHMS, *SYMMETRIC_ALGORITHMS)): raise NotImplementedError() except NotImplementedError as err: raise RuntimeError(f'Invalid algorithm for JWT verification: {algorithm_name} is not supported') from err return algorithm def _key_to_pem(key: SupportedKey, algorithm: SupportedAlgorithm) -> str: if isinstance(algorithm, SYMMETRIC_ALGORITHMS) or isinstance(key, bytes): return key.decode('utf-8') if not is_public_key(key): raise RuntimeError('Failed to parse JWT key: the provided key is a private key') return key.public_bytes(encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo).decode('utf-8') def parse_jwt_public_key_json(payload: Any, algorithm: SupportedAlgorithm) -> str: if not isinstance(payload, list) or not payload: raise ValueError('Payload must be a list of JWK formatted keys.') try: key = algorithm.from_jwk(payload[0]) except (jwt.InvalidKeyError, KeyError) as err: raise RuntimeError(f'Failed to parse JWT key: {err.args[0]}') from err return _key_to_pem(key, algorithm) def parse_jwt_public_key_pem(payload: str, algorithm: SupportedAlgorithm) -> str: try: key = algorithm.prepare_key(payload) except (jwt.InvalidKeyError, KeyError) as err: raise RuntimeError(f'Failed to parse JWT key: {err.args[0]}') from err return _key_to_pem(key, algorithm) def fetch_jwt_public_key_json(url: str, hf_timeout_seconds: Optional[float]=None) -> Any: try: response = httpx.get(url, timeout=hf_timeout_seconds) response.raise_for_status() return response.json() except Exception as err: raise RuntimeError(f'Failed to fetch the JWT public key from {url}. ') from err def get_jwt_public_keys(algorithm_name: Optional[str]=None, public_key_url: Optional[str]=None, additional_public_keys: Optional[list[str]]=None, timeout_seconds: Optional[float]=None) -> list[str]: try: keys: list[str] = [] if not algorithm_name: return keys algorithm = create_algorithm(algorithm_name) if public_key_url: payload = fetch_jwt_public_key_json(url=public_key_url, hf_timeout_seconds=timeout_seconds) keys.append(parse_jwt_public_key_json(payload=payload, algorithm=algorithm)) if additional_public_keys: keys.extend((parse_jwt_public_key_pem(payload=payload, algorithm=algorithm) for payload in additional_public_keys)) logging.debug(f"JWT public keys are: {', '.join(keys)}.") return keys except Exception as err: raise JWTKeysError('Failed to create the JWT public keys.') from err READ_PERMISSIONS = ['repo.content.read', 'repo.content.write', 'repo.read', 'repo.write'] def validate_jwt(dataset: str, token: Any, public_keys: list[str], algorithm: str, verify_exp: Optional[bool]=True) -> None: for public_key in public_keys: logging.debug(f'Trying to decode the JWT with key #{public_keys.index(public_key)}: {public_key}.') try: decoded = jwt.decode(jwt=token, key=public_key, algorithms=[algorithm], options={'require': ['exp', 'sub', 'permissions'], 'verify_exp': verify_exp}) logging.debug(f"Decoded JWT is: '{public_key}'.") break except jwt.exceptions.InvalidSignatureError as e: if public_key == public_keys[-1]: raise JWTInvalidSignature('The JWT signature verification failed. Check the signing key and the algorithm.', e) from e logging.debug(f"JWT signature verification failed with key: '{public_key}'. Trying next key.") except jwt.exceptions.MissingRequiredClaimError as e: raise JWTMissingRequiredClaim('A claim is missing in the JWT payload.', e) from e except jwt.exceptions.ExpiredSignatureError as e: raise JWTExpiredSignature('The JWT signature has expired. Try to refresh the token.', e) from e except (jwt.exceptions.InvalidKeyError, jwt.exceptions.InvalidAlgorithmError) as e: raise JWTInvalidKeyOrAlgorithm('The key used to verify the signature is not compatible with the algorithm. Check the signing key and the algorithm.', e) from e except Exception as e: raise UnexpectedApiError('An error has occurred while decoding the JWT.', e) from e sub = decoded.get('sub') if not isinstance(sub, str) or ((not sub.startswith('datasets/') or sub.removeprefix('datasets/') != dataset) and (not sub.startswith('/datasets/') or sub.removeprefix('/datasets/') != dataset)): raise JWTInvalidClaimSub("The 'sub' claim in JWT payload is invalid. It should be in the form 'datasets/<...dataset identifier...>' or '/datasets/<...dataset identifier...>'.") permissions = decoded.get('permissions') if not isinstance(permissions, dict): raise JWTMissingRequiredClaim("The 'permissions' claim in the JWT payload must be a dict.") if not any((permissions.get(permission) is True for permission in READ_PERMISSIONS)): raise JWTInvalidClaimRead('No permission in JWT payload is True. Not allowed to read the dataset.') # File: dataset-viewer-main/libs/libapi/src/libapi/request.py from libcommon.constants import MAX_NUM_ROWS_PER_PAGE from starlette.requests import Request from libapi.exceptions import InvalidParameterError, MissingRequiredParameterError from libapi.utils import is_non_empty_string def get_request_parameter_length(request: Request) -> int: try: length = int(request.query_params.get('length', MAX_NUM_ROWS_PER_PAGE)) except ValueError: raise InvalidParameterError("Parameter 'length' must be integer") if length < 0: raise InvalidParameterError("Parameter 'length' must be positive") elif length > MAX_NUM_ROWS_PER_PAGE: raise InvalidParameterError(f"Parameter 'length' must not be greater than {MAX_NUM_ROWS_PER_PAGE}") return length def get_request_parameter_offset(request: Request) -> int: try: offset = int(request.query_params.get('offset', 0)) except ValueError: raise InvalidParameterError("Parameter 'offset' must be integer") if offset < 0: raise InvalidParameterError(message="Parameter 'offset' must be positive") return offset def get_request_parameter(request: Request, parameter_name: str, required: bool=False, default: str='') -> str: parameter = request.query_params.get(parameter_name, default) if required: if not parameter or not is_non_empty_string(parameter): raise MissingRequiredParameterError(f"Parameter '{parameter_name}' is required") return parameter # File: dataset-viewer-main/libs/libapi/src/libapi/response.py import logging from typing import Optional import pyarrow as pa from datasets import Features from libcommon.constants import MAX_NUM_ROWS_PER_PAGE, ROW_IDX_COLUMN from libcommon.dtos import PaginatedResponse from libcommon.storage_client import StorageClient from libcommon.viewer_utils.features import to_features_list from libapi.utils import to_rows_list async def create_response(dataset: str, revision: str, config: str, split: str, storage_client: StorageClient, pa_table: pa.Table, offset: int, features: Features, unsupported_columns: list[str], num_rows_total: int, partial: bool, use_row_idx_column: bool=False, truncated_columns: Optional[list[str]]=None) -> PaginatedResponse: if set(pa_table.column_names).intersection(set(unsupported_columns)): raise RuntimeError('The pyarrow table contains unsupported columns. They should have been ignored in the row group reader.') logging.debug(f'create response for dataset={dataset!r} config={config!r} split={split!r}') return {'features': [feature_item for feature_item in to_features_list(features) if not use_row_idx_column or feature_item['name'] != ROW_IDX_COLUMN], 'rows': await to_rows_list(pa_table=pa_table, dataset=dataset, revision=revision, config=config, split=split, storage_client=storage_client, offset=offset, features=features, unsupported_columns=unsupported_columns, row_idx_column=ROW_IDX_COLUMN if use_row_idx_column else None, truncated_columns=truncated_columns), 'num_rows_total': num_rows_total, 'num_rows_per_page': MAX_NUM_ROWS_PER_PAGE, 'partial': partial} # File: dataset-viewer-main/libs/libapi/src/libapi/routes/metrics.py import logging from libcommon.prometheus import Prometheus from prometheus_client import CONTENT_TYPE_LATEST from starlette.requests import Request from starlette.responses import Response from libapi.utils import Endpoint def create_metrics_endpoint() -> Endpoint: prometheus = Prometheus() async def metrics_endpoint(_: Request) -> Response: logging.info('/metrics') return Response(prometheus.getLatestContent(), headers={'Content-Type': CONTENT_TYPE_LATEST}) return metrics_endpoint # File: dataset-viewer-main/libs/libapi/src/libapi/rows_utils.py from collections.abc import Callable from functools import partial from typing import Any, Optional import anyio from datasets import Features from libcommon.dtos import Row from libcommon.storage_client import StorageClient from libcommon.viewer_utils.features import get_cell_value from tqdm.contrib.concurrent import thread_map def _transform_row(row_idx_and_row: tuple[int, Row], dataset: str, revision: str, config: str, split: str, features: Features, storage_client: StorageClient, offset: int, row_idx_column: Optional[str]) -> Row: (row_idx, row) = row_idx_and_row transformed_row = {featureName: get_cell_value(dataset=dataset, revision=revision, config=config, split=split, row_idx=offset + row_idx if row_idx_column is None else row[row_idx_column], cell=row[featureName] if featureName in row else None, featureName=featureName, fieldType=fieldType, storage_client=storage_client) for (featureName, fieldType) in features.items()} if row_idx_column and row_idx_column not in transformed_row: transformed_row |= {row_idx_column: row[row_idx_column]} return transformed_row async def transform_rows(dataset: str, revision: str, config: str, split: str, rows: list[Row], features: Features, storage_client: StorageClient, offset: int, row_idx_column: Optional[str]) -> list[Row]: fn = partial(_transform_row, dataset=dataset, revision=revision, config=config, split=split, features=features, storage_client=storage_client, offset=offset, row_idx_column=row_idx_column) if 'Audio(' in str(features) or 'Image(' in str(features): desc = f'_transform_row for {dataset}' _thread_map = partial(thread_map, desc=desc, total=len(rows)) return await anyio.to_thread.run_sync(_thread_map, fn, enumerate(rows)) else: def _map(func: Callable[[Any], Any], *iterables: Any) -> list[Row]: return list(map(func, *iterables)) return await anyio.to_thread.run_sync(_map, fn, enumerate(rows)) # File: dataset-viewer-main/libs/libapi/src/libapi/utils.py import logging from collections.abc import Callable, Coroutine from http import HTTPStatus from typing import Any, Optional import pyarrow as pa from datasets import Features from libcommon.dtos import Priority, RowItem from libcommon.exceptions import CustomError from libcommon.operations import update_dataset from libcommon.orchestrator import has_pending_ancestor_jobs from libcommon.simple_cache import CachedArtifactNotFoundError, CacheEntry, has_some_cache from libcommon.simple_cache import get_response as get_cached_response from libcommon.storage_client import StorageClient from libcommon.utils import orjson_dumps from starlette.requests import Request from starlette.responses import JSONResponse, Response from libapi.exceptions import ResponseNotFoundError, ResponseNotReadyError, TransformRowsProcessingError from libapi.rows_utils import transform_rows class OrjsonResponse(JSONResponse): def render(self, content: Any) -> bytes: return orjson_dumps(content=content) def get_response(content: Any, status_code: int=200, max_age: int=0) -> Response: headers = {'Cache-Control': f'max-age={max_age}'} if max_age > 0 else {'Cache-Control': 'no-store'} return OrjsonResponse(content=content, status_code=status_code, headers=headers) def get_json_response(content: Any, status_code: HTTPStatus=HTTPStatus.OK, max_age: int=0, error_code: Optional[str]=None, revision: Optional[str]=None, headers: Optional[dict[str, str]]=None) -> Response: if not headers: headers = {} headers['Cache-Control'] = f'max-age={max_age}' if max_age > 0 else 'no-store' if error_code is not None: headers['X-Error-Code'] = error_code if revision is not None: headers['X-Revision'] = revision return OrjsonResponse(content=content, status_code=status_code.value, headers=headers) EXPOSED_HEADERS = ['X-Error-Code', 'X-Revision'] def get_json_ok_response(content: Any, max_age: int=0, revision: Optional[str]=None, headers: Optional[dict[str, str]]=None) -> Response: return get_json_response(content=content, max_age=max_age, revision=revision, headers=headers) def get_json_error_response(content: Any, status_code: HTTPStatus=HTTPStatus.OK, max_age: int=0, error_code: Optional[str]=None, revision: Optional[str]=None) -> Response: return get_json_response(content=content, status_code=status_code, max_age=max_age, error_code=error_code, revision=revision) def get_json_api_error_response(error: CustomError, max_age: int=0, revision: Optional[str]=None) -> Response: return get_json_error_response(content=error.as_response(), status_code=error.status_code, max_age=max_age, error_code=error.code, revision=revision) def is_non_empty_string(string: Any) -> bool: return isinstance(string, str) and bool(string.strip()) def are_valid_parameters(parameters: list[Any]) -> bool: return all((is_non_empty_string(s) for s in parameters)) def try_backfill_dataset_then_raise(processing_step_name: str, dataset: str, hf_endpoint: str, blocked_datasets: list[str], hf_token: Optional[str]=None, hf_timeout_seconds: Optional[float]=None, storage_clients: Optional[list[StorageClient]]=None) -> None: if has_pending_ancestor_jobs(dataset=dataset, processing_step_name=processing_step_name): logging.debug('Cache entry not found but some jobs are still in progress, so it could exist in the future') raise ResponseNotReadyError('The server is busier than usual and the response is not ready yet. Please retry later.') logging.debug('No pending job that could create the expected cache entry') if has_some_cache(dataset=dataset): logging.debug('Some cache entries exist, so the dataset is supported, but that cache entry will never be created') raise ResponseNotFoundError('Not found.') logging.debug('No cache entry found') update_dataset(dataset=dataset, blocked_datasets=blocked_datasets, hf_endpoint=hf_endpoint, hf_token=hf_token, hf_timeout_seconds=hf_timeout_seconds, priority=Priority.NORMAL, storage_clients=storage_clients) logging.debug("The dataset is supported and it's being backfilled") raise ResponseNotReadyError('The server is busier than usual and the response is not ready yet. Please retry later.') def get_cache_entry_from_step(processing_step_name: str, dataset: str, config: Optional[str], split: Optional[str], hf_endpoint: str, blocked_datasets: list[str], hf_token: Optional[str]=None, hf_timeout_seconds: Optional[float]=None, storage_clients: Optional[list[StorageClient]]=None) -> CacheEntry: try: response = get_cached_response(kind=processing_step_name, dataset=dataset, config=config, split=split) except CachedArtifactNotFoundError: if not dataset.startswith('CyberHarem/'): try_backfill_dataset_then_raise(processing_step_name=processing_step_name, dataset=dataset, hf_endpoint=hf_endpoint, blocked_datasets=blocked_datasets, hf_timeout_seconds=hf_timeout_seconds, hf_token=hf_token, storage_clients=storage_clients) return response Endpoint = Callable[[Request], Coroutine[Any, Any, Response]] async def to_rows_list(pa_table: pa.Table, dataset: str, revision: str, config: str, split: str, offset: int, features: Features, unsupported_columns: list[str], storage_client: StorageClient, row_idx_column: Optional[str]=None, truncated_columns: Optional[list[str]]=None) -> list[RowItem]: num_rows = pa_table.num_rows for (idx, (column, feature)) in enumerate(features.items()): if column in unsupported_columns: pa_table = pa_table.add_column(idx, column, pa.array([None] * num_rows)) try: transformed_rows = await transform_rows(dataset=dataset, revision=revision, config=config, split=split, rows=pa_table.to_pylist(), features=features, storage_client=storage_client, offset=offset, row_idx_column=row_idx_column) except Exception as err: raise TransformRowsProcessingError('Server error while post-processing the split rows. Please report the issue.') from err return [{'row_idx': idx + offset if row_idx_column is None else row.pop(row_idx_column), 'row': row, 'truncated_cells': truncated_columns or []} for (idx, row) in enumerate(transformed_rows)] # File: dataset-viewer-main/libs/libcommon/src/libcommon/cloudfront.py import datetime from functools import partial from typing import Optional import botocore.signers from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15 from cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKey from cryptography.hazmat.primitives.hashes import SHA1 from cryptography.hazmat.primitives.serialization import load_pem_private_key from libcommon.config import CloudFrontConfig from libcommon.utils import get_expires class InvalidPrivateKeyError(ValueError): pass padding = PKCS1v15() algorithm = SHA1() class CloudFrontSigner: _expiration_seconds: int _signer: botocore.signers.CloudFrontSigner def __init__(self, key_pair_id: str, private_key: str, expiration_seconds: int) -> None: try: pk = load_pem_private_key(private_key.encode('utf8'), password=None, backend=default_backend()) except ValueError as e: raise InvalidPrivateKeyError('Invalid private key') from e if not isinstance(pk, RSAPrivateKey): raise InvalidPrivateKeyError('Expected an RSA private key') self._expiration_seconds = expiration_seconds self._signer = botocore.signers.CloudFrontSigner(key_pair_id, partial(pk.sign, padding=padding, algorithm=algorithm)) def _sign_url(self, url: str, date_less_than: datetime.datetime) -> str: return self._signer.generate_presigned_url(url, date_less_than=date_less_than) def sign_url(self, url: str) -> str: date_less_than = get_expires(seconds=self._expiration_seconds) return self._sign_url(url=url, date_less_than=date_less_than) def get_cloudfront_signer(cloudfront_config: CloudFrontConfig) -> Optional[CloudFrontSigner]: return CloudFrontSigner(key_pair_id=cloudfront_config.key_pair_id, private_key=cloudfront_config.private_key, expiration_seconds=cloudfront_config.expiration_seconds) if cloudfront_config.key_pair_id and cloudfront_config.private_key else None # File: dataset-viewer-main/libs/libcommon/src/libcommon/config.py import logging from dataclasses import dataclass, field from typing import Literal, Optional from environs import Env from marshmallow.validate import OneOf STORAGE_PROTOCOL_VALUES: list[str] = ['file', 's3'] StorageProtocol = Literal['file', 's3'] ASSETS_BASE_URL = 'http://localhost/assets' ASSETS_STORAGE_PROTOCOL: StorageProtocol = 'file' ASSETS_STORAGE_ROOT = '/storage/assets' @dataclass(frozen=True) class AssetsConfig: base_url: str = ASSETS_BASE_URL storage_protocol: StorageProtocol = ASSETS_STORAGE_PROTOCOL storage_root: str = ASSETS_STORAGE_ROOT @classmethod def from_env(cls) -> 'AssetsConfig': env = Env(expand_vars=True) with env.prefixed('ASSETS_'): return cls(base_url=env.str(name='BASE_URL', default=ASSETS_BASE_URL), storage_protocol=env.str(name='STORAGE_PROTOCOL', default=ASSETS_STORAGE_PROTOCOL, validate=OneOf(STORAGE_PROTOCOL_VALUES, error='ASSETS_STORAGE_PROTOCOL must be one of: {choices}')), storage_root=env.str(name='STORAGE_ROOT', default=ASSETS_STORAGE_ROOT)) S3_ACCESS_KEY_ID = None S3_SECRET_ACCESS_KEY = None S3_REGION_NAME = 'us-east-1' @dataclass(frozen=True) class S3Config: access_key_id: Optional[str] = S3_ACCESS_KEY_ID secret_access_key: Optional[str] = S3_SECRET_ACCESS_KEY region_name: str = S3_REGION_NAME @classmethod def from_env(cls) -> 'S3Config': env = Env(expand_vars=True) with env.prefixed('S3_'): return cls(access_key_id=env.str(name='ACCESS_KEY_ID', default=S3_ACCESS_KEY_ID), secret_access_key=env.str(name='SECRET_ACCESS_KEY', default=S3_SECRET_ACCESS_KEY), region_name=env.str(name='REGION_NAME', default=S3_REGION_NAME)) CACHED_ASSETS_BASE_URL = 'http://localhost/cached-assets' CACHED_ASSETS_STORAGE_PROTOCOL: StorageProtocol = 'file' CACHED_ASSETS_STORAGE_ROOT = '/storage/cached-assets' @dataclass(frozen=True) class CachedAssetsConfig: base_url: str = CACHED_ASSETS_BASE_URL storage_protocol: StorageProtocol = CACHED_ASSETS_STORAGE_PROTOCOL storage_root: str = CACHED_ASSETS_STORAGE_ROOT @classmethod def from_env(cls) -> 'CachedAssetsConfig': env = Env(expand_vars=True) with env.prefixed('CACHED_ASSETS_'): return cls(base_url=env.str(name='BASE_URL', default=CACHED_ASSETS_BASE_URL), storage_protocol=env.str(name='STORAGE_PROTOCOL', default=CACHED_ASSETS_STORAGE_PROTOCOL, validate=OneOf(STORAGE_PROTOCOL_VALUES, error='CACHED_ASSETS_STORAGE_PROTOCOL must be one of: {choices}')), storage_root=env.str(name='STORAGE_ROOT', default=CACHED_ASSETS_STORAGE_ROOT)) CLOUDFRONT_EXPIRATION_SECONDS = 60 * 60 * 24 CLOUDFRONT_KEY_PAIR_ID = None CLOUDFRONT_PRIVATE_KEY = None @dataclass(frozen=True) class CloudFrontConfig: expiration_seconds: int = CLOUDFRONT_EXPIRATION_SECONDS key_pair_id: Optional[str] = CLOUDFRONT_KEY_PAIR_ID private_key: Optional[str] = CLOUDFRONT_PRIVATE_KEY @classmethod def from_env(cls) -> 'CloudFrontConfig': env = Env(expand_vars=True) with env.prefixed('CLOUDFRONT_'): return cls(expiration_seconds=env.int(name='EXPIRATION_SECONDS', default=CLOUDFRONT_EXPIRATION_SECONDS), key_pair_id=env.str(name='KEY_PAIR_ID', default=CLOUDFRONT_KEY_PAIR_ID), private_key=env.str(name='PRIVATE_KEY', default=CLOUDFRONT_PRIVATE_KEY)) PARQUET_METADATA_STORAGE_DIRECTORY = None @dataclass(frozen=True) class ParquetMetadataConfig: storage_directory: Optional[str] = PARQUET_METADATA_STORAGE_DIRECTORY @classmethod def from_env(cls) -> 'ParquetMetadataConfig': env = Env(expand_vars=True) with env.prefixed('PARQUET_METADATA_'): return cls(storage_directory=env.str(name='STORAGE_DIRECTORY', default=PARQUET_METADATA_STORAGE_DIRECTORY)) ROWS_INDEX_MAX_ARROW_DATA_IN_MEMORY = 300000000 @dataclass(frozen=True) class RowsIndexConfig: max_arrow_data_in_memory: int = ROWS_INDEX_MAX_ARROW_DATA_IN_MEMORY @classmethod def from_env(cls) -> 'RowsIndexConfig': env = Env(expand_vars=True) with env.prefixed('ROWS_INDEX_'): return cls(max_arrow_data_in_memory=env.int(name='MAX_ARROW_DATA_IN_MEMORY', default=ROWS_INDEX_MAX_ARROW_DATA_IN_MEMORY)) COMMON_BLOCKED_DATASETS: list[str] = [] COMMON_HF_ENDPOINT = 'https://huggingface.co' COMMON_HF_TOKEN = None @dataclass(frozen=True) class CommonConfig: blocked_datasets: list[str] = field(default_factory=COMMON_BLOCKED_DATASETS.copy) hf_endpoint: str = COMMON_HF_ENDPOINT hf_token: Optional[str] = COMMON_HF_TOKEN @classmethod def from_env(cls) -> 'CommonConfig': env = Env(expand_vars=True) with env.prefixed('COMMON_'): return cls(blocked_datasets=env.list(name='BLOCKED_DATASETS', default=COMMON_BLOCKED_DATASETS.copy()), hf_endpoint=env.str(name='HF_ENDPOINT', default=COMMON_HF_ENDPOINT), hf_token=env.str(name='HF_TOKEN', default=COMMON_HF_TOKEN)) LOG_LEVEL = logging.INFO @dataclass(frozen=True) class LogConfig: level: int = LOG_LEVEL @classmethod def from_env(cls) -> 'LogConfig': env = Env(expand_vars=True) with env.prefixed('LOG_'): return cls(level=env.log_level(name='LEVEL', default=LOG_LEVEL)) CACHE_MONGO_DATABASE = 'dataset_viewer_cache' CACHE_MONGO_URL = 'mongodb://localhost:27017' @dataclass(frozen=True) class CacheConfig: mongo_database: str = CACHE_MONGO_DATABASE mongo_url: str = CACHE_MONGO_URL @classmethod def from_env(cls) -> 'CacheConfig': env = Env(expand_vars=True) with env.prefixed('CACHE_'): return cls(mongo_database=env.str(name='MONGO_DATABASE', default=CACHE_MONGO_DATABASE), mongo_url=env.str(name='MONGO_URL', default=CACHE_MONGO_URL)) QUEUE_MONGO_DATABASE = 'dataset_viewer_queue' QUEUE_MONGO_URL = 'mongodb://localhost:27017' @dataclass(frozen=True) class QueueConfig: mongo_database: str = QUEUE_MONGO_DATABASE mongo_url: str = QUEUE_MONGO_URL @classmethod def from_env(cls) -> 'QueueConfig': env = Env(expand_vars=True) with env.prefixed('QUEUE_'): return cls(mongo_database=env.str(name='MONGO_DATABASE', default=QUEUE_MONGO_DATABASE), mongo_url=env.str(name='MONGO_URL', default=QUEUE_MONGO_URL)) # File: dataset-viewer-main/libs/libcommon/src/libcommon/constants.py CACHE_COLLECTION_RESPONSES = 'cachedResponsesBlue' CACHE_MONGOENGINE_ALIAS = 'cache' HF_DATASETS_CACHE_APPNAME = 'hf_datasets_cache' PARQUET_METADATA_CACHE_APPNAME = 'datasets_server_parquet_metadata' DESCRIPTIVE_STATISTICS_CACHE_APPNAME = 'dataset_viewer_descriptive_statistics' DUCKDB_INDEX_CACHE_APPNAME = 'dataset_viewer_duckdb_index' DUCKDB_INDEX_JOB_RUNNER_SUBDIRECTORY = 'job_runner' CACHE_METRICS_COLLECTION = 'cacheTotalMetric' TYPE_STATUS_AND_DATASET_STATUS_JOB_COUNTS_COLLECTION = 'jobTotalMetric' WORKER_TYPE_JOB_COUNTS_COLLECTION = 'workerTypeJobCounts' QUEUE_COLLECTION_JOBS = 'jobsBlue' QUEUE_COLLECTION_PAST_JOBS = 'pastJobs' QUEUE_COLLECTION_LOCKS = 'locks' QUEUE_COLLECTION_DATASET_BLOCKAGES = 'datasetBlockages' QUEUE_MONGOENGINE_ALIAS = 'queue' QUEUE_TTL_SECONDS = 600 LOCK_TTL_SECONDS_NO_OWNER = 600 LOCK_TTL_SECONDS_TO_START_JOB = 600 LOCK_TTL_SECONDS_TO_WRITE_ON_GIT_BRANCH = 3600 DATASET_SEPARATOR = '--' DEFAULT_DIFFICULTY = 50 DEFAULT_DIFFICULTY_MAX = 100 DEFAULT_DIFFICULTY_MIN = 0 DEFAULT_INPUT_TYPE = 'dataset' DEFAULT_JOB_RUNNER_VERSION = 1 DIFFICULTY_BONUS_BY_FAILED_RUNS = 20 MIN_BYTES_FOR_BONUS_DIFFICULTY = 3000000000 PROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS = 100 PROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS = 100 PROCESSING_STEP_CONFIG_PARQUET_AND_INFO_ROW_GROUP_SIZE_FOR_BINARY_DATASETS = 100 PARQUET_REVISION = 'refs/convert/parquet' TAG_NFAA_CONTENT = 'not-for-all-audiences' TAG_NFAA_SYNONYMS = [TAG_NFAA_CONTENT, 'nsfw', 'porn', 'hentai', 'inappropriate'] DEFAULT_MAX_FAILED_RUNS = 3 LARGE_MAX_FAILED_RUNS = 30 MAX_FAILED_RUNS_PER_ERROR_CODE = {'RetryableConfigNamesError': DEFAULT_MAX_FAILED_RUNS, 'ConnectionError': DEFAULT_MAX_FAILED_RUNS, 'ExternalServerError': DEFAULT_MAX_FAILED_RUNS, 'JobManagerCrashedError': DEFAULT_MAX_FAILED_RUNS, 'StreamingRowsError': DEFAULT_MAX_FAILED_RUNS, 'CreateCommitError': LARGE_MAX_FAILED_RUNS, 'HfHubError': LARGE_MAX_FAILED_RUNS, 'LockedDatasetTimeoutError': LARGE_MAX_FAILED_RUNS, 'PreviousStepStillProcessingError': LARGE_MAX_FAILED_RUNS} ERROR_CODES_TO_RETRY = list(MAX_FAILED_RUNS_PER_ERROR_CODE.keys()) CONFIG_HAS_VIEWER_KIND = 'config-size' CONFIG_INFO_KIND = 'config-info' CONFIG_PARQUET_METADATA_KIND = 'config-parquet-metadata' CONFIG_SPLIT_NAMES_KIND = 'config-split-names' DATASET_CONFIG_NAMES_KIND = 'dataset-config-names' DATASET_INFO_KIND = 'dataset-info' SPLIT_DUCKDB_INDEX_KIND = 'split-duckdb-index' SPLIT_HAS_PREVIEW_KIND = 'split-first-rows' SPLIT_HAS_SEARCH_KIND = 'split-duckdb-index' SPLIT_HAS_STATISTICS_KIND = 'split-descriptive-statistics' ROW_IDX_COLUMN = '__hf_index_id' HF_FTS_SCORE = '__hf_fts_score' CROISSANT_MAX_CONFIGS = 100 LOADING_METHODS_MAX_CONFIGS = 100 MAX_NUM_ROWS_PER_PAGE = 100 MAX_COLUMN_NAME_LENGTH = 500 LONG_DURATION_PROMETHEUS_HISTOGRAM_BUCKETS = (0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1.0, 2.5, 5.0, 7.5, 10.0, 25.0, 50.0, 75.0, 100.0, 250.0, 500.0, 750.0, 1000.0, 2500.0, 5000.0, float('inf')) YAML_FIELDS_TO_CHECK = ['dataset_info', 'configs', 'viewer', 'language'] # File: dataset-viewer-main/libs/libcommon/src/libcommon/croissant_utils.py from collections.abc import Mapping from typing import Any, Union from datasets import ClassLabel, Image, Sequence, Value def get_record_set(dataset: str, config_name: str) -> str: if dataset != config_name: return config_name else: return f'record_set_{config_name}' MAX_COLUMNS = 1000 def truncate_features_from_croissant_crumbs_response(content: Mapping[str, Any]) -> None: if isinstance(content, dict) and 'recordSet' in content and isinstance(content['recordSet'], list): for record in content['recordSet']: if isinstance(record, dict) and 'field' in record and isinstance(record['field'], list) and (len(record['field']) > MAX_COLUMNS): num_columns = len(record['field']) record['field'] = record['field'][:MAX_COLUMNS] record['description'] += f"\n- {num_columns - MAX_COLUMNS} skipped column{('s' if num_columns - MAX_COLUMNS > 1 else '')} (max number of columns reached)" HF_TO_CROISSANT_VALUE_TYPE = {'binary': 'sc:Text', 'bool': 'sc:Boolean', 'float8': 'sc:Float', 'float16': 'sc:Float', 'float32': 'sc:Float', 'float64': 'sc:Float', 'int16': 'sc:Integer', 'int32': 'sc:Integer', 'int64': 'sc:Integer', 'large_string': 'sc:Text', 'string': 'sc:Text'} def feature_to_croissant_field(distribution_name: str, field_name: str, column: str, feature: Any) -> Union[dict[str, Any], None]: if isinstance(feature, Value) and feature.dtype in HF_TO_CROISSANT_VALUE_TYPE: return {'@type': 'cr:Field', '@id': field_name, 'name': field_name, 'description': f"Column '{column}' from the Hugging Face parquet file.", 'dataType': HF_TO_CROISSANT_VALUE_TYPE[feature.dtype], 'source': {'fileSet': {'@id': distribution_name}, 'extract': {'column': column}}} elif isinstance(feature, Image): return {'@type': 'cr:Field', '@id': field_name, 'name': field_name, 'description': f"Image column '{column}' from the Hugging Face parquet file.", 'dataType': 'sc:ImageObject', 'source': {'fileSet': {'@id': distribution_name}, 'extract': {'column': column}, 'transform': {'jsonPath': 'bytes'}}} elif isinstance(feature, ClassLabel): return {'@type': 'cr:Field', '@id': field_name, 'name': field_name, 'description': f"ClassLabel column '{column}' from the Hugging Face parquet file.\nLabels:\n" + ', '.join((f'{name} ({i})' for (i, name) in enumerate(feature.names))), 'dataType': 'sc:Integer', 'source': {'fileSet': {'@id': distribution_name}, 'extract': {'column': column}}} elif isinstance(feature, (Sequence, list)): if isinstance(feature, Sequence): sub_feature = feature.feature else: if len(feature) != 1: return None sub_feature = feature[0] field = feature_to_croissant_field(distribution_name, field_name, column, sub_feature) if field: field['repeated'] = True return field return None # File: dataset-viewer-main/libs/libcommon/src/libcommon/dtos.py import enum from collections.abc import Mapping from dataclasses import dataclass from datetime import datetime from http import HTTPStatus from typing import Any, Optional, TypedDict Row = dict[str, Any] @dataclass class RowsContent: rows: list[Row] all_fetched: bool truncated_columns: list[str] class Status(str, enum.Enum): WAITING = 'waiting' STARTED = 'started' class Priority(str, enum.Enum): HIGH = 'high' NORMAL = 'normal' LOW = 'low' class WorkerSize(str, enum.Enum): heavy = 'heavy' medium = 'medium' light = 'light' class JobParams(TypedDict): dataset: str revision: str config: Optional[str] split: Optional[str] class JobInfo(TypedDict): job_id: str type: str params: JobParams priority: Priority difficulty: int started_at: Optional[datetime] class FlatJobInfo(TypedDict): job_id: str type: str dataset: str revision: str config: Optional[str] split: Optional[str] priority: str status: str difficulty: int created_at: datetime class JobOutput(TypedDict): content: Mapping[str, Any] http_status: HTTPStatus error_code: Optional[str] details: Optional[Mapping[str, Any]] progress: Optional[float] class JobResult(TypedDict): job_info: JobInfo job_runner_version: int is_success: bool output: Optional[JobOutput] duration: Optional[float] class SplitHubFile(TypedDict): dataset: str config: str split: str url: str filename: str size: int class RowItem(TypedDict): row_idx: int row: Row truncated_cells: list[str] class FeatureItem(TypedDict): feature_idx: int name: str type: dict[str, Any] class PaginatedResponse(TypedDict): features: list[FeatureItem] rows: list[RowItem] num_rows_total: int num_rows_per_page: int partial: bool class DatasetItem(TypedDict): dataset: str class ConfigItem(DatasetItem): config: Optional[str] class SplitItem(ConfigItem): split: Optional[str] class FullConfigItem(DatasetItem): config: str class FullSplitItem(FullConfigItem): split: str class SplitFirstRowsResponse(FullSplitItem): features: list[FeatureItem] rows: list[RowItem] truncated: bool # File: dataset-viewer-main/libs/libcommon/src/libcommon/duckdb_utils.py from libcommon.parquet_utils import PARTIAL_PREFIX, parquet_export_is_partial def duckdb_index_is_partial(duckdb_index_url: str) -> bool: (_, duckdb_index_file_name) = duckdb_index_url.rsplit('/', 1) return parquet_export_is_partial(duckdb_index_url) or duckdb_index_file_name.startswith(PARTIAL_PREFIX) # File: dataset-viewer-main/libs/libcommon/src/libcommon/exceptions.py import logging import sys import traceback from http import HTTPStatus from typing import Literal, Optional, TypedDict, Union class ErrorResponseWithoutCause(TypedDict): error: str class ErrorResponseWithCause(ErrorResponseWithoutCause, total=False): cause_exception: str cause_message: str cause_traceback: list[str] ErrorResponse = Union[ErrorResponseWithoutCause, ErrorResponseWithCause] class LoggedError(Exception): def __init__(self, message: str): self.message = message logging.debug(self.message) super().__init__(self.message) class CustomError(LoggedError): def __init__(self, message: str, status_code: HTTPStatus, code: str, cause: Optional[BaseException]=None, disclose_cause: Optional[bool]=None): super().__init__(message) self.exception = type(self).__name__ self.status_code = status_code self.code = code self.message = str(self) self.disclose_cause = disclose_cause if disclose_cause is not None else cause is not None if cause is not None: self.cause_exception: Optional[str] = type(cause).__name__ self.cause_message: Optional[str] = str(cause) (t, v, tb) = sys.exc_info() self.cause_traceback: Optional[list[str]] = traceback.format_exception(t, v, tb) else: self.cause_exception = None self.cause_message = None self.cause_traceback = None def as_response_with_cause(self) -> ErrorResponseWithCause: error: ErrorResponseWithCause = {'error': self.message} if self.cause_exception is not None: error['cause_exception'] = self.cause_exception if self.cause_message is not None: error['cause_message'] = self.cause_message if self.cause_traceback is not None: error['cause_traceback'] = self.cause_traceback return error def as_response_without_cause(self) -> ErrorResponseWithoutCause: return {'error': self.message} def as_response(self) -> ErrorResponse: return self.as_response_with_cause() if self.disclose_cause else self.as_response_without_cause() CacheableErrorCode = Literal['CacheDirectoryNotInitializedError', 'ComputationError', 'ConfigNamesError', 'ConfigNotFoundError', 'CreateCommitError', 'DataFilesNotFoundError', 'DatasetGenerationError', 'DatasetGenerationCastError', 'DatasetInBlockListError', 'DatasetNotFoundError', 'DatasetWithScriptNotSupportedError', 'DatasetWithTooComplexDataFilesPatternsError', 'DatasetWithTooManyConfigsError', 'DatasetWithTooManyParquetFilesError', 'DatasetWithTooManySplitsError', 'DiskError', 'DuckDBIndexFileNotFoundError', 'EmptyDatasetError', 'ExternalServerError', 'FeaturesError', 'FeaturesResponseEmptyError', 'FileFormatMismatchBetweenSplitsError', 'FileSystemError', 'HfHubError', 'InfoError', 'JobManagerCrashedError', 'JobManagerExceededMaximumDurationError', 'LockedDatasetTimeoutError', 'MissingSpawningTokenError', 'NoSupportedFeaturesError', 'NotSupportedDisabledRepositoryError', 'NotSupportedDisabledViewerError', 'NotSupportedPrivateRepositoryError', 'NotSupportedRepositoryNotFoundError', 'NotSupportedTagNFAAError', 'NormalRowsError', 'ParameterMissingError', 'ParquetResponseEmptyError', 'PresidioScanNotEnabledForThisDataset', 'PreviousStepFormatError', 'PreviousStepStatusError', 'PreviousStepStillProcessingError', 'PolarsParquetReadError', 'RetryableConfigNamesError', 'RowsPostProcessingError', 'SplitsNamesError', 'SplitNamesFromStreamingError', 'SplitNotFoundError', 'SplitParquetSchemaMismatchError', 'SplitWithTooBigParquetError', 'StreamingRowsError', 'TooBigContentError', 'TooLongColumnNameError', 'TooManyColumnsError', 'UnexpectedError'] class CacheableError(CustomError): def __init__(self, message: str, status_code: HTTPStatus, code: CacheableErrorCode, cause: Optional[BaseException]=None, disclose_cause: bool=False): super().__init__(message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause) class CacheDirectoryNotInitializedError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, 'CacheDirectoryNotInitializedError', cause, True) class ConfigNamesError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'ConfigNamesError', cause, True) class ConfigNotFoundError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message=message, status_code=HTTPStatus.NOT_FOUND, code='ConfigNotFoundError', cause=cause, disclose_cause=False) class CreateCommitError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'CreateCommitError', cause, False) class DataFilesNotFoundError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'DataFilesNotFoundError', cause, False) class DatasetGenerationError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'DatasetGenerationError', cause, True) class DatasetGenerationCastError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'DatasetGenerationCastError', cause, True) class DatasetNotFoundError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message=message, status_code=HTTPStatus.NOT_FOUND, code='DatasetNotFoundError', cause=cause, disclose_cause=False) class DatasetWithTooManyConfigsError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, 'DatasetWithTooManyConfigsError', cause, True) class DatasetWithTooManySplitsError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, 'DatasetWithTooManySplitsError', cause, True) class DatasetWithTooManyParquetFilesError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, 'DatasetWithTooManyParquetFilesError', cause, True) class DuckDBIndexFileNotFoundError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'DuckDBIndexFileNotFoundError', cause, False) class DiskError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message=message, status_code=HTTPStatus.INTERNAL_SERVER_ERROR, code='DiskError', cause=cause, disclose_cause=False) class EmptyDatasetError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'EmptyDatasetError', cause, True) class ExternalServerError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'ExternalServerError', cause, False) class FeaturesError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'FeaturesError', cause, True) class FeaturesResponseEmptyError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'FeaturesResponseEmptyError', cause, True) class FileFormatMismatchBetweenSplitsError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'FileFormatMismatchBetweenSplitsError', cause, False) class FileSystemError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'FileSystemError', cause, False) class HfHubError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'HfHubError', cause, False) class InfoError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'InfoError', cause, True) class JobManagerCrashedError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message=message, status_code=HTTPStatus.NOT_IMPLEMENTED, code='JobManagerCrashedError', cause=cause, disclose_cause=False) class JobManagerExceededMaximumDurationError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message=message, status_code=HTTPStatus.NOT_IMPLEMENTED, code='JobManagerExceededMaximumDurationError', cause=cause, disclose_cause=False) class LockedDatasetTimeoutError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, 'LockedDatasetTimeoutError', cause, True) class MissingSpawningTokenError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'MissingSpawningTokenError', cause, False) class NormalRowsError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'NormalRowsError', cause, True) class NoSupportedFeaturesError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, 'NoSupportedFeaturesError', cause, True) class ParameterMissingError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message=message, status_code=HTTPStatus.INTERNAL_SERVER_ERROR, code='ParameterMissingError', cause=cause, disclose_cause=False) class ParquetResponseEmptyError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'ParquetResponseEmptyError', cause, False) class PreviousStepFormatError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'PreviousStepFormatError', cause, False) class PreviousStepStatusError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'PreviousStepStatusError', cause, False) class PolarsParquetReadError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'PolarsParquetReadError', cause, False) class PreviousStepStillProcessingError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'PreviousStepStillProcessingError', cause, False) class RetryableConfigNamesError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'RetryableConfigNamesError', cause, True) class RowsPostProcessingError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'RowsPostProcessingError', cause, False) class SplitsNamesError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'SplitsNamesError', cause, True) class SplitNamesFromStreamingError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'SplitNamesFromStreamingError', cause, True) class SplitNotFoundError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message=message, status_code=HTTPStatus.NOT_FOUND, code='SplitNotFoundError', cause=cause, disclose_cause=False) class SplitParquetSchemaMismatchError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message=message, status_code=HTTPStatus.UNPROCESSABLE_ENTITY, code='SplitParquetSchemaMismatchError', cause=cause, disclose_cause=False) class StatisticsComputationError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'ComputationError', cause, True) class StreamingRowsError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'StreamingRowsError', cause, True) class TooBigContentError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message=message, status_code=HTTPStatus.NOT_IMPLEMENTED, code='TooBigContentError', cause=cause, disclose_cause=False) class TooManyColumnsError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'TooManyColumnsError', cause, True) class TooLongColumnNameError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'TooLongColumnNameError', cause, True) class UnexpectedError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message=message, status_code=HTTPStatus.INTERNAL_SERVER_ERROR, code='UnexpectedError', cause=cause, disclose_cause=False) logging.error(message, exc_info=cause) class DatasetWithScriptNotSupportedError(CacheableError): def __init__(self, message: str='', cause: Optional[BaseException]=None): message = message or "The dataset viewer doesn't support this dataset because it runs arbitrary Python code. You can convert it to a Parquet data-only dataset by using the convert_to_parquet CLI from the datasets library. See: https://huggingface.co/docs/datasets/main/en/cli#convert-to-parquet" super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, 'DatasetWithScriptNotSupportedError', cause, True) class NotSupportedError(CacheableError): pass class NotSupportedDisabledRepositoryError(NotSupportedError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, 'NotSupportedDisabledRepositoryError', cause, False) class NotSupportedRepositoryNotFoundError(NotSupportedError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, 'NotSupportedRepositoryNotFoundError', cause, False) class NotSupportedDisabledViewerError(NotSupportedError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, 'NotSupportedDisabledViewerError', cause, False) class NotSupportedPrivateRepositoryError(NotSupportedError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, 'NotSupportedPrivateRepositoryError', cause, False) class NotSupportedTagNFAAError(NotSupportedError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, 'NotSupportedTagNFAAError', cause, False) class DatasetInBlockListError(NotSupportedError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, 'DatasetInBlockListError', cause, False) class DatasetWithTooComplexDataFilesPatternsError(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, 'DatasetWithTooComplexDataFilesPatternsError', cause, True) class PresidioScanNotEnabledForThisDataset(CacheableError): def __init__(self, message: str, cause: Optional[BaseException]=None): super().__init__(message, HTTPStatus.NOT_IMPLEMENTED, 'PresidioScanNotEnabledForThisDataset', cause, False) # File: dataset-viewer-main/libs/libcommon/src/libcommon/operations.py import logging from dataclasses import dataclass, field from typing import Optional, Union from huggingface_hub.hf_api import DatasetInfo, HfApi from huggingface_hub.utils import HfHubHTTPError, RepositoryNotFoundError, get_session, hf_raise_for_status, validate_hf_hub_args from libcommon.constants import TAG_NFAA_SYNONYMS from libcommon.dtos import Priority from libcommon.exceptions import NotSupportedDisabledRepositoryError, NotSupportedDisabledViewerError, NotSupportedError, NotSupportedPrivateRepositoryError, NotSupportedRepositoryNotFoundError, NotSupportedTagNFAAError from libcommon.orchestrator import TasksStatistics, backfill, get_revision, remove_dataset, set_revision, smart_set_revision from libcommon.state import IncoherentCacheError from libcommon.storage_client import StorageClient from libcommon.utils import raise_if_blocked @dataclass class EntityInfo: is_pro: Optional[bool] is_enterprise: Optional[bool] def __init__(self, **kwargs) -> None: self.is_pro = kwargs.pop('isPro', None) self.is_enterprise = kwargs.pop('isEnterprise', None) class CustomHfApi(HfApi): @validate_hf_hub_args def whoisthis(self, name: str, *, timeout: Optional[float]=None, token: Optional[Union[bool, str]]=None) -> EntityInfo: headers = self._build_hf_headers(token=token) path = f'{self.endpoint}/api/whoisthis' params = {'name': name} r = get_session().get(path, headers=headers, timeout=timeout, params=params) hf_raise_for_status(r) data = r.json() return EntityInfo(**data) def get_dataset_info(dataset: str, hf_endpoint: str, hf_token: Optional[str]=None, hf_timeout_seconds: Optional[float]=None) -> DatasetInfo: return HfApi(endpoint=hf_endpoint).dataset_info(repo_id=dataset, token=hf_token, timeout=hf_timeout_seconds, files_metadata=False) def get_entity_info(author: str, hf_endpoint: str, hf_token: Optional[str]=None, hf_timeout_seconds: Optional[float]=None) -> EntityInfo: return CustomHfApi(endpoint=hf_endpoint).whoisthis(name=author, token=hf_token, timeout=hf_timeout_seconds) def get_latest_dataset_revision_if_supported_or_raise(dataset: str, hf_endpoint: str, hf_token: Optional[str]=None, hf_timeout_seconds: Optional[float]=None, blocked_datasets: Optional[list[str]]=None) -> str: try: dataset_info = get_dataset_info(dataset=dataset, hf_endpoint=hf_endpoint, hf_token=hf_token, hf_timeout_seconds=hf_timeout_seconds) except RepositoryNotFoundError as e: raise NotSupportedRepositoryNotFoundError(f'Repository {dataset} is not found.', e) from e except HfHubHTTPError as e: response = e.response if response.headers.get('X-Error-Message') == 'Access to this resource is disabled.': raise NotSupportedDisabledRepositoryError(f'Repository {dataset} is disabled.', e) from e raise revision = dataset_info.sha if not revision: raise ValueError(f'Cannot get the git revision of dataset {dataset}.') if dataset_info.disabled: raise NotSupportedDisabledRepositoryError(f'Not supported: dataset repository {dataset} is disabled.') if dataset_info.private: author = dataset_info.author if not author: raise ValueError(f'Cannot get the author of dataset {dataset}.') entity_info = get_entity_info(author=author, hf_endpoint=hf_endpoint, hf_token=hf_token, hf_timeout_seconds=hf_timeout_seconds) if not entity_info.is_pro and (not entity_info.is_enterprise): raise NotSupportedPrivateRepositoryError(f'Not supported: dataset repository {dataset} is private. Private datasets are only supported for PRO users and Enterprise Hub organizations.') elif dataset_info.tags and any((tag in TAG_NFAA_SYNONYMS for tag in dataset_info.tags)): raise NotSupportedTagNFAAError('Not supported: dataset viewer is disabled.') if dataset_info.cardData and (not dataset_info.cardData.get('viewer', True)): raise NotSupportedDisabledViewerError(f'Not supported: dataset viewer is disabled in {dataset} configuration.') if blocked_datasets: raise_if_blocked(dataset=dataset, blocked_datasets=blocked_datasets) return str(revision) def get_current_revision(dataset: str) -> Optional[str]: logging.debug(f"get current revision for dataset='{dataset}'") return get_revision(dataset=dataset) @dataclass class OperationsStatistics: num_backfilled_datasets: int = 0 num_deleted_datasets: int = 0 num_untouched_datasets: int = 0 tasks: TasksStatistics = field(default_factory=TasksStatistics) def add(self, other: 'OperationsStatistics') -> None: self.num_backfilled_datasets += other.num_backfilled_datasets self.num_deleted_datasets += other.num_deleted_datasets self.num_untouched_datasets += other.num_untouched_datasets self.tasks.add(other.tasks) def delete_dataset(dataset: str, storage_clients: Optional[list[StorageClient]]=None) -> OperationsStatistics: logging.debug(f"delete cache for dataset='{dataset}'") return OperationsStatistics(num_deleted_datasets=1, tasks=remove_dataset(dataset=dataset, storage_clients=storage_clients)) def update_dataset(dataset: str, hf_endpoint: str, blocked_datasets: Optional[list[str]]=None, hf_token: Optional[str]=None, hf_timeout_seconds: Optional[float]=None, priority: Priority=Priority.LOW, storage_clients: Optional[list[StorageClient]]=None) -> None: try: revision = get_latest_dataset_revision_if_supported_or_raise(dataset=dataset, hf_endpoint=hf_endpoint, hf_token=hf_token, hf_timeout_seconds=hf_timeout_seconds, blocked_datasets=blocked_datasets) except NotSupportedError as e: logging.warning(f"Dataset {dataset} is not supported ({type(e)}). Let's delete the dataset.") delete_dataset(dataset=dataset, storage_clients=storage_clients) raise set_revision(dataset=dataset, revision=revision, priority=priority) def smart_update_dataset(dataset: str, revision: str, hf_endpoint: str, old_revision: str, blocked_datasets: Optional[list[str]]=None, hf_token: Optional[str]=None, hf_timeout_seconds: Optional[float]=None, storage_clients: Optional[list[StorageClient]]=None) -> None: try: get_latest_dataset_revision_if_supported_or_raise(dataset=dataset, hf_endpoint=hf_endpoint, hf_token=hf_token, hf_timeout_seconds=hf_timeout_seconds, blocked_datasets=blocked_datasets) except NotSupportedError as e: logging.warning(f"Dataset {dataset} is not supported ({type(e)}). Let's delete the dataset.") delete_dataset(dataset=dataset, storage_clients=storage_clients) raise smart_set_revision(dataset=dataset, revision=revision, old_revision=old_revision, storage_clients=storage_clients, hf_endpoint=hf_endpoint, hf_token=hf_token) def backfill_dataset(dataset: str, hf_endpoint: str, blocked_datasets: Optional[list[str]]=None, hf_token: Optional[str]=None, hf_timeout_seconds: Optional[float]=None, priority: Priority=Priority.LOW, storage_clients: Optional[list[StorageClient]]=None) -> OperationsStatistics: try: revision = get_latest_dataset_revision_if_supported_or_raise(dataset=dataset, hf_endpoint=hf_endpoint, hf_token=hf_token, hf_timeout_seconds=hf_timeout_seconds, blocked_datasets=blocked_datasets) except NotSupportedError as e: logging.warning(f"Dataset {dataset} is not supported ({type(e)}). Let's delete the dataset.") return delete_dataset(dataset=dataset, storage_clients=storage_clients) try: tasks_statistics = backfill(dataset=dataset, revision=revision, priority=priority) except IncoherentCacheError: logging.warning(f"Dataset {dataset} has incoherent entries in the cache. Let's first delete the dataset, then backfill again.") delete_dataset(dataset=dataset, storage_clients=storage_clients) tasks_statistics = backfill(dataset=dataset, revision=revision, priority=priority) has_tasks = tasks_statistics.has_tasks() return OperationsStatistics(num_backfilled_datasets=1 if has_tasks else 0, num_untouched_datasets=1 if not has_tasks else 0, tasks=tasks_statistics) # File: dataset-viewer-main/libs/libcommon/src/libcommon/orchestrator.py import logging import time from abc import ABC, abstractmethod from dataclasses import dataclass, field from functools import lru_cache from http import HTTPStatus from typing import Optional, Union import pandas as pd from huggingface_hub import DatasetCard, HfFileSystem from huggingface_hub.utils import build_hf_headers, get_session from libcommon.constants import CONFIG_INFO_KIND, CONFIG_SPLIT_NAMES_KIND, DATASET_CONFIG_NAMES_KIND, DEFAULT_DIFFICULTY_MAX, DIFFICULTY_BONUS_BY_FAILED_RUNS, YAML_FIELDS_TO_CHECK from libcommon.dtos import JobInfo, JobResult, Priority from libcommon.processing_graph import ProcessingGraph, ProcessingStep, ProcessingStepDoesNotExist, processing_graph from libcommon.prometheus import StepProfiler from libcommon.queue.jobs import Queue from libcommon.simple_cache import CachedArtifactNotFoundError, delete_dataset_responses, fetch_names, get_cache_entries_df, get_response, get_response_metadata, update_revision_of_dataset_responses, upsert_response_params from libcommon.state import ArtifactState, DatasetState, FirstStepsDatasetState from libcommon.storage_client import StorageClient @dataclass class CacheStatus: cache_has_different_git_revision: dict[str, ArtifactState] = field(default_factory=dict) cache_is_outdated_by_parent: dict[str, ArtifactState] = field(default_factory=dict) cache_is_empty: dict[str, ArtifactState] = field(default_factory=dict) cache_is_error_to_retry: dict[str, ArtifactState] = field(default_factory=dict) cache_is_job_runner_obsolete: dict[str, ArtifactState] = field(default_factory=dict) up_to_date: dict[str, ArtifactState] = field(default_factory=dict) def as_response(self) -> dict[str, list[str]]: return {'cache_has_different_git_revision': sorted(self.cache_has_different_git_revision.keys()), 'cache_is_outdated_by_parent': sorted(self.cache_is_outdated_by_parent.keys()), 'cache_is_empty': sorted(self.cache_is_empty.keys()), 'cache_is_error_to_retry': sorted(self.cache_is_error_to_retry.keys()), 'cache_is_job_runner_obsolete': sorted(self.cache_is_job_runner_obsolete.keys()), 'up_to_date': sorted(self.up_to_date.keys())} @dataclass class QueueStatus: in_process: set[str] = field(default_factory=set) def as_response(self) -> dict[str, list[str]]: return {'in_process': sorted(self.in_process)} @dataclass class TasksStatistics: num_created_jobs: int = 0 num_deleted_waiting_jobs: int = 0 num_deleted_cache_entries: int = 0 num_updated_cache_entries: int = 0 num_deleted_storage_directories: int = 0 num_updated_storage_directories: int = 0 def add(self, other: 'TasksStatistics') -> None: self.num_created_jobs += other.num_created_jobs self.num_deleted_waiting_jobs += other.num_deleted_waiting_jobs self.num_deleted_cache_entries += other.num_deleted_cache_entries self.num_updated_cache_entries += other.num_updated_cache_entries self.num_deleted_storage_directories += other.num_deleted_storage_directories self.num_updated_storage_directories += other.num_updated_storage_directories def has_tasks(self) -> bool: return any([self.num_created_jobs > 0, self.num_deleted_waiting_jobs > 0, self.num_deleted_cache_entries > 0, self.num_updated_cache_entries > 0, self.num_deleted_storage_directories > 0, self.num_updated_storage_directories > 0]) def get_log(self) -> str: return f'{self.num_created_jobs} created jobs, {self.num_deleted_waiting_jobs} deleted waiting jobs, {self.num_deleted_cache_entries} deleted cache entries, {self.num_updated_cache_entries} updated cache entries, {self.num_deleted_storage_directories} deleted storage directories, {self.num_updated_storage_directories} updated storage directories' @dataclass class Task(ABC): id: str = field(init=False) long_id: str = field(init=False) @abstractmethod def run(self) -> TasksStatistics: pass @dataclass class CreateJobsTask(Task): job_infos: list[JobInfo] = field(default_factory=list) def __post_init__(self) -> None: self.id = f'CreateJobs,{len(self.job_infos)}' types = [job_info['type'] for job_info in self.job_infos] self.long_id = f'CreateJobs,{types}' def run(self) -> TasksStatistics: with StepProfiler(method='CreateJobsTask.run', step='all'): num_created_jobs = Queue().create_jobs(job_infos=self.job_infos) if num_created_jobs != len(self.job_infos): raise ValueError(f'Something went wrong when creating jobs: {len(self.job_infos)} jobs were supposed to be created, but {num_created_jobs} were created.') return TasksStatistics(num_created_jobs=num_created_jobs) @dataclass class DeleteWaitingJobsTask(Task): jobs_df: pd.DataFrame def __post_init__(self) -> None: self.id = f'DeleteWaitingJobs,{len(self.jobs_df)}' types = [row['type'] for (_, row) in self.jobs_df.iterrows()] self.long_id = f'DeleteWaitingJobs,{types}' def run(self) -> TasksStatistics: with StepProfiler(method='DeleteWaitingJobsTask.run', step='all'): num_deleted_waiting_jobs = Queue().delete_waiting_jobs_by_job_id(job_ids=self.jobs_df['job_id'].tolist()) logging.debug(f'{num_deleted_waiting_jobs} waiting jobs were deleted.') return TasksStatistics(num_deleted_waiting_jobs=num_deleted_waiting_jobs) @dataclass class DeleteDatasetWaitingJobsTask(Task): dataset: str def __post_init__(self) -> None: self.id = f'DeleteDatasetJobs,{self.dataset}' self.long_id = self.id def run(self) -> TasksStatistics: with StepProfiler(method='DeleteDatasetWaitingJobsTask.run', step='all'): return TasksStatistics(num_deleted_waiting_jobs=Queue().delete_dataset_waiting_jobs(dataset=self.dataset)) @dataclass class DeleteDatasetCacheEntriesTask(Task): dataset: str def __post_init__(self) -> None: self.id = f'DeleteDatasetCacheEntries,{self.dataset}' self.long_id = self.id def run(self) -> TasksStatistics: with StepProfiler(method='DeleteDatasetCacheEntriesTask.run', step='all'): return TasksStatistics(num_deleted_cache_entries=delete_dataset_responses(dataset=self.dataset)) @dataclass class UpdateRevisionOfDatasetCacheEntriesTask(Task): dataset: str old_revision: str new_revision: str def __post_init__(self) -> None: self.id = 'UpdateRevisionOfDatasetCacheEntriesTask,1' self.long_id = self.id def run(self) -> TasksStatistics: with StepProfiler(method='UpdateRevisionOfDatasetCacheEntriesTask.run', step='all'): return TasksStatistics(num_updated_cache_entries=update_revision_of_dataset_responses(dataset=self.dataset, old_revision=self.old_revision, new_revision=self.new_revision)) @dataclass class DeleteDatasetStorageTask(Task): dataset: str storage_client: StorageClient def __post_init__(self) -> None: self.id = f'DeleteDatasetStorageTask,{self.dataset},{self.storage_client.protocol}://{self.storage_client.storage_root}' self.long_id = self.id def run(self) -> TasksStatistics: with StepProfiler(method='DeleteDatasetStorageTask.run', step='all'): return TasksStatistics(num_deleted_storage_directories=self.storage_client.delete_dataset_directory(self.dataset)) @dataclass class UpdateRevisionOfDatasetStorageTask(Task): dataset: str old_revision: str new_revision: str storage_client: StorageClient def __post_init__(self) -> None: self.id = f'UpdateRevisionOfDatasetStorageTask,{self.dataset},{self.storage_client.protocol}://{self.storage_client.storage_root}' self.long_id = self.id def run(self) -> TasksStatistics: with StepProfiler(method='UpdateRevisionOfDatasetStorageTask.run', step='all'): return TasksStatistics(num_updated_storage_directories=self.storage_client.update_revision_of_dataset_revision_directory(self.dataset, self.old_revision, self.new_revision)) SupportedTask = Union[CreateJobsTask, DeleteWaitingJobsTask, DeleteDatasetWaitingJobsTask, DeleteDatasetCacheEntriesTask, DeleteDatasetStorageTask, UpdateRevisionOfDatasetCacheEntriesTask, UpdateRevisionOfDatasetStorageTask] @dataclass class Plan: tasks: list[SupportedTask] = field(init=False) def __post_init__(self) -> None: self.tasks = [] def add_task(self, task: SupportedTask) -> None: self.tasks.append(task) def run(self) -> TasksStatistics: statistics = TasksStatistics() for (idx, task) in enumerate(self.tasks): logging.debug(f'Running task [{idx}/{len(self.tasks)}]: {task.long_id}') statistics.add(task.run()) return statistics def as_response(self) -> list[str]: return sorted((task.id for task in self.tasks)) def get_num_bytes_from_config_infos(dataset: str, config: str, split: Optional[str]=None) -> Optional[int]: try: resp = get_response(kind=CONFIG_INFO_KIND, dataset=dataset, config=config) except CachedArtifactNotFoundError: return None if 'dataset_info' in resp['content'] and isinstance(resp['content']['dataset_info'], dict): dataset_info = resp['content']['dataset_info'] if split is None: num_bytes = dataset_info.get('dataset_size') if isinstance(num_bytes, int): return num_bytes elif 'splits' in dataset_info and isinstance(dataset_info['splits'], dict): split_infos = dataset_info['splits'] if split in split_infos and isinstance(split_infos[split], dict): split_info = split_infos[split] num_bytes = split_info.get('num_bytes') if isinstance(num_bytes, int): return num_bytes return None @dataclass class AfterJobPlan(Plan): job_info: JobInfo processing_graph: ProcessingGraph failed_runs: int dataset: str = field(init=False) config: Optional[str] = field(init=False) split: Optional[str] = field(init=False) revision: str = field(init=False) priority: Priority = field(init=False) def __post_init__(self) -> None: super().__post_init__() self.dataset = self.job_info['params']['dataset'] self.revision = self.job_info['params']['revision'] self.priority = self.job_info['priority'] config = self.job_info['params']['config'] split = self.job_info['params']['split'] job_type = self.job_info['type'] try: processing_step = self.processing_graph.get_processing_step_by_job_type(job_type) next_processing_steps = self.processing_graph.get_children(processing_step.name) except ProcessingStepDoesNotExist as e: raise ValueError(f'Processing step with job type: {job_type} does not exist') from e if len(next_processing_steps) == 0: return if config is not None: self.num_bytes = get_num_bytes_from_config_infos(dataset=self.dataset, config=config, split=split) else: self.num_bytes = None self.pending_jobs_df = Queue().get_pending_jobs_df(dataset=self.dataset, job_types=[next_processing_step.job_type for next_processing_step in next_processing_steps]) self.job_infos_to_create: list[JobInfo] = [] config_names: Optional[list[str]] = None split_names: Optional[list[str]] = None for next_processing_step in next_processing_steps: if processing_step.input_type == next_processing_step.input_type: self.update(next_processing_step, config, split) elif processing_step.input_type in ['config', 'split'] and next_processing_step.input_type == 'dataset': self.update(next_processing_step, None, None) elif processing_step.input_type == 'split' and next_processing_step.input_type == 'config': self.update(next_processing_step, config, None) elif processing_step.input_type == 'dataset' and next_processing_step.input_type == 'config': if config_names is None: config_names = fetch_names(dataset=self.dataset, config=None, cache_kind=DATASET_CONFIG_NAMES_KIND, names_field='config_names', name_field='config') for config_name in config_names: self.update(next_processing_step, config_name, None) elif processing_step.input_type == 'config' and next_processing_step.input_type == 'split': if split_names is None: split_names = fetch_names(dataset=self.dataset, config=config, cache_kind=CONFIG_SPLIT_NAMES_KIND, names_field='splits', name_field='split') for split_name in split_names: self.update(next_processing_step, config, split_name) else: raise NotImplementedError(f'Unsupported input types: {processing_step.input_type} -> {next_processing_step.input_type}') if not self.pending_jobs_df.empty: self.add_task(DeleteWaitingJobsTask(jobs_df=self.pending_jobs_df)) if self.job_infos_to_create: self.add_task(CreateJobsTask(job_infos=self.job_infos_to_create)) def update(self, next_processing_step: ProcessingStep, config: Optional[str], split: Optional[str]) -> None: config_mask = self.pending_jobs_df['config'].isnull() if config is None else self.pending_jobs_df['config'] == config split_mask = self.pending_jobs_df['split'].isnull() if split is None else self.pending_jobs_df['split'] == split unrelated_jobs_mask = (self.pending_jobs_df['type'] == next_processing_step.job_type) & ((self.pending_jobs_df['dataset'] != self.dataset) | ~config_mask | ~split_mask) self.pending_jobs_df = self.pending_jobs_df[~unrelated_jobs_mask] jobs_mask = (self.pending_jobs_df['type'] == next_processing_step.job_type) & (self.pending_jobs_df['dataset'] == self.dataset) & config_mask & split_mask ok_jobs_mask = jobs_mask & (self.pending_jobs_df['revision'] == self.revision) if ok_jobs_mask.any(): self.pending_jobs_df.drop(ok_jobs_mask.idxmax(), inplace=True) else: difficulty = next_processing_step.difficulty if self.num_bytes is not None and self.num_bytes >= self.processing_graph.min_bytes_for_bonus_difficulty: difficulty += next_processing_step.bonus_difficulty_if_dataset_is_big difficulty = min(DEFAULT_DIFFICULTY_MAX, difficulty) self.job_infos_to_create.append({'job_id': 'not used', 'type': next_processing_step.job_type, 'params': {'dataset': self.dataset, 'config': config, 'split': split, 'revision': self.revision}, 'priority': self.priority, 'difficulty': difficulty, 'started_at': None}) @dataclass class DatasetBackfillPlan(Plan): dataset: str revision: str priority: Priority = Priority.LOW only_first_processing_steps: bool = False processing_graph: ProcessingGraph = field(default=processing_graph) pending_jobs_df: pd.DataFrame = field(init=False) cache_entries_df: pd.DataFrame = field(init=False) dataset_state: DatasetState = field(init=False) cache_status: CacheStatus = field(init=False) def __post_init__(self) -> None: super().__post_init__() with StepProfiler(method='DatasetBackfillPlan.__post_init__', step='all'): with StepProfiler(method='DatasetBackfillPlan.__post_init__', step='get_pending_jobs_df'): job_types = [processing_step.job_type for processing_step in self.processing_graph.get_first_processing_steps()] if self.only_first_processing_steps else None self.pending_jobs_df = Queue().get_pending_jobs_df(dataset=self.dataset, job_types=job_types) with StepProfiler(method='DatasetBackfillPlan.__post_init__', step='get_cache_entries_df'): cache_kinds = [processing_step.cache_kind for processing_step in self.processing_graph.get_first_processing_steps()] if self.only_first_processing_steps else None self.cache_entries_df = get_cache_entries_df(dataset=self.dataset, cache_kinds=cache_kinds) with StepProfiler(method='DatasetBackfillPlan.__post_init__', step='get_dataset_state'): self.dataset_state = FirstStepsDatasetState(dataset=self.dataset, processing_graph=self.processing_graph, revision=self.revision, pending_jobs_df=self.pending_jobs_df, cache_entries_df=self.cache_entries_df) if self.only_first_processing_steps else DatasetState(dataset=self.dataset, processing_graph=self.processing_graph, revision=self.revision, pending_jobs_df=self.pending_jobs_df, cache_entries_df=self.cache_entries_df) with StepProfiler(method='DatasetBackfillPlan.__post_init__', step='_get_cache_status'): self.cache_status = self._get_cache_status() with StepProfiler(method='DatasetBackfillPlan.__post_init__', step='_create_plan'): self._create_plan() def _get_artifact_states_for_step(self, processing_step: ProcessingStep, config: Optional[str]=None, split: Optional[str]=None) -> list[ArtifactState]: if processing_step.input_type == 'dataset': artifact_states = [self.dataset_state.artifact_state_by_step[processing_step.name]] elif processing_step.input_type == 'config': if config is None: artifact_states = [config_state.artifact_state_by_step[processing_step.name] for config_state in self.dataset_state.config_states] else: artifact_states = [config_state.artifact_state_by_step[processing_step.name] for config_state in self.dataset_state.config_states if config_state.config == config] elif processing_step.input_type == 'split': if config is None: artifact_states = [split_state.artifact_state_by_step[processing_step.name] for config_state in self.dataset_state.config_states for split_state in config_state.split_states] elif split is None: artifact_states = [split_state.artifact_state_by_step[processing_step.name] for config_state in self.dataset_state.config_states if config_state.config == config for split_state in config_state.split_states] else: artifact_states = [split_state.artifact_state_by_step[processing_step.name] for config_state in self.dataset_state.config_states if config_state.config == config for split_state in config_state.split_states if split_state.split == split] else: raise ValueError(f'Invalid input type: {processing_step.input_type}') artifact_states_ids = {artifact_state.id for artifact_state in artifact_states} if len(artifact_states_ids) != len(artifact_states): raise ValueError(f'Duplicate artifact states for processing_step {processing_step}') return artifact_states def _get_cache_status(self) -> CacheStatus: cache_status = CacheStatus() processing_steps = self.processing_graph.get_first_processing_steps() if self.only_first_processing_steps else self.processing_graph.get_topologically_ordered_processing_steps() for processing_step in processing_steps: artifact_states = self._get_artifact_states_for_step(processing_step) for artifact_state in artifact_states: if any((artifact_state.cache_state.is_older_than(parent_artifact_state.cache_state) for parent_step in self.processing_graph.get_parents(processing_step.name) for parent_artifact_state in self._get_artifact_states_for_step(processing_step=parent_step, config=artifact_state.config, split=artifact_state.split))): cache_status.cache_is_outdated_by_parent[artifact_state.id] = artifact_state continue if artifact_state.cache_state.is_empty(): cache_status.cache_is_empty[artifact_state.id] = artifact_state continue if artifact_state.cache_state.is_error_to_retry(): cache_status.cache_is_error_to_retry[artifact_state.id] = artifact_state continue if artifact_state.cache_state.is_job_runner_obsolete(): cache_status.cache_is_job_runner_obsolete[artifact_state.id] = artifact_state continue if artifact_state.cache_state.is_git_revision_different_from(self.revision): cache_status.cache_has_different_git_revision[artifact_state.id] = artifact_state continue cache_status.up_to_date[artifact_state.id] = artifact_state return cache_status def get_queue_status(self) -> QueueStatus: processing_steps = self.processing_graph.get_first_processing_steps() if self.only_first_processing_steps else self.processing_graph.get_topologically_ordered_processing_steps() return QueueStatus(in_process={artifact_state.id for processing_step in processing_steps for artifact_state in self._get_artifact_states_for_step(processing_step) if artifact_state.job_state.is_in_process}) def _create_plan(self) -> None: pending_jobs_to_delete_df = self.pending_jobs_df.copy() job_infos_to_create: list[JobInfo] = [] artifact_states = list(self.cache_status.cache_is_empty.values()) + list(self.cache_status.cache_is_error_to_retry.values()) + list(self.cache_status.cache_is_outdated_by_parent.values()) + list(self.cache_status.cache_is_job_runner_obsolete.values()) + list(self.cache_status.cache_has_different_git_revision.values()) @lru_cache def is_big(config: str) -> bool: num_bytes = get_num_bytes_from_config_infos(dataset=self.dataset, config=config) if num_bytes is None: return False else: return num_bytes > self.processing_graph.min_bytes_for_bonus_difficulty for artifact_state in artifact_states: valid_pending_jobs_df = artifact_state.job_state.valid_pending_jobs_df if valid_pending_jobs_df.empty: difficulty = artifact_state.processing_step.difficulty if isinstance(artifact_state.config, str) and is_big(config=artifact_state.config): difficulty += artifact_state.processing_step.bonus_difficulty_if_dataset_is_big if artifact_state.cache_state.cache_entry_metadata is not None: failed_runs = artifact_state.cache_state.cache_entry_metadata['failed_runs'] else: failed_runs = 0 difficulty = min(DEFAULT_DIFFICULTY_MAX, difficulty + failed_runs * DIFFICULTY_BONUS_BY_FAILED_RUNS) job_infos_to_create.append({'job_id': 'not used', 'type': artifact_state.processing_step.job_type, 'params': {'dataset': self.dataset, 'revision': self.revision, 'config': artifact_state.config, 'split': artifact_state.split}, 'priority': self.priority, 'difficulty': difficulty, 'started_at': None}) else: pending_jobs_to_delete_df.drop(valid_pending_jobs_df.index, inplace=True) if not pending_jobs_to_delete_df.empty: self.add_task(DeleteWaitingJobsTask(jobs_df=pending_jobs_to_delete_df)) if job_infos_to_create: self.add_task(CreateJobsTask(job_infos=job_infos_to_create)) class SmartUpdateImpossibleBecauseCacheIsEmpty(Exception): pass class SmartUpdateImpossibleBecauseOfUpdatedFiles(Exception): pass class SmartUpdateImpossibleBecauseOfUpdatedYAMLField(Exception): pass class SmartUpdateImpossibleBecauseCachedRevisionIsNotParentOfNewRevision(Exception): pass class SmartUpdateImpossibleBecauseCacheHasMultipleRevisions(Exception): pass @dataclass class SmartDatasetUpdatePlan(Plan): dataset: str revision: str hf_endpoint: str old_revision: str processing_graph: ProcessingGraph = field(default=processing_graph) storage_clients: Optional[list[StorageClient]] = None hf_token: Optional[str] = None cached_revision: str = field(init=False) diff: str = field(init=False) files_impacted_by_commit: set[str] = field(init=False) updated_yaml_fields_in_dataset_card: list[str] = field(init=False) def __post_init__(self) -> None: super().__post_init__() cache_kinds = [processing_step.cache_kind for processing_step in self.processing_graph.get_first_processing_steps()] for retry in range(3): cache_entries_df = get_cache_entries_df(dataset=self.dataset, cache_kinds=cache_kinds) if len(cache_entries_df) == 0: raise SmartUpdateImpossibleBecauseCacheIsEmpty(f'Failed to smart update to {self.revision[:7]}') cached_git_revisions = cache_entries_df['dataset_git_revision'].unique() if len(cached_git_revisions) > 1: raise SmartUpdateImpossibleBecauseCacheHasMultipleRevisions(f'Expected only 1 revision in the cache but got {len(cached_git_revisions)}: ' + ', '.join(cached_git_revisions)) self.cached_revision = cache_entries_df.sort_values('updated_at').iloc[-1]['dataset_git_revision'] if self.cached_revision == self.revision: return elif self.cached_revision == self.old_revision: break logging.warning(f'[{retry + 1}/3] Retrying smart update of {self.dataset} in 1s (received {str(self.old_revision)[:7]}->{self.revision[:7]} but cache is {self.cached_revision[:7]})') time.sleep(1) else: logging.warning(f'Failed to smart update {self.dataset} to {self.revision[:7]} because the cached revision {self.cached_revision[:7]} is not its parent') raise SmartUpdateImpossibleBecauseCachedRevisionIsNotParentOfNewRevision(f'Failed to smart update {self.dataset} to {self.revision[:7]} because the cached revision {self.cached_revision[:7]} is not its parent') self.diff = self.get_diff() self.files_impacted_by_commit = self.get_impacted_files() if self.files_impacted_by_commit - {'README.md', '.gitattributes', '.gitignore'}: raise SmartUpdateImpossibleBecauseOfUpdatedFiles(', '.join(self.files_impacted_by_commit)[:1000]) self.updated_yaml_fields_in_dataset_card = self.get_updated_yaml_fields_in_dataset_card() for yaml_field in YAML_FIELDS_TO_CHECK: if yaml_field in self.updated_yaml_fields_in_dataset_card: raise SmartUpdateImpossibleBecauseOfUpdatedYAMLField(yaml_field) self.add_task(UpdateRevisionOfDatasetCacheEntriesTask(dataset=self.dataset, old_revision=self.old_revision, new_revision=self.revision)) if self.storage_clients: for storage_client in self.storage_clients: self.add_task(UpdateRevisionOfDatasetStorageTask(dataset=self.dataset, old_revision=self.old_revision, new_revision=self.revision, storage_client=storage_client)) def get_diff(self) -> str: headers = build_hf_headers(token=self.hf_token, library_name='dataset-viewer') resp = get_session().get(self.hf_endpoint + f'/datasets/{self.dataset}/commit/{self.revision}.diff', timeout=10, headers=headers) resp.raise_for_status() if not isinstance(resp.content, bytes): raise RuntimeError(f'failed reading /datasets/{self.dataset}/commit/{self.revision}.diff') return resp.content.decode('utf-8') def get_impacted_files(self) -> set[str]: return set((line.split(' ', 2)[2] if line.startswith('rename ') else line.split('/', 1)[1] for line in self.diff.split('\n') if line.startswith('--- a/') or line.startswith('+++ b/') or line.startswith('rename from ') or line.startswith('rename to '))) def get_updated_yaml_fields_in_dataset_card(self) -> list[str]: if 'README.md' not in self.files_impacted_by_commit: return [] fs = HfFileSystem(endpoint=self.hf_endpoint, token=self.hf_token) try: with fs.open(f'datasets/{self.dataset}/README.md', revision=self.revision, mode='r', newline='', encoding='utf-8') as f: dataset_card_data_dict = DatasetCard(f.read()).data.to_dict() except FileNotFoundError: dataset_card_data_dict = {} try: with fs.open(f'datasets/{self.dataset}/README.md', revision=self.old_revision, mode='r', newline='', encoding='utf-8') as f: old_dataset_card_data_dict = DatasetCard(f.read()).data.to_dict() except FileNotFoundError: old_dataset_card_data_dict = {} return [yaml_field for yaml_field in set(dataset_card_data_dict) | set(old_dataset_card_data_dict) if dataset_card_data_dict.get(yaml_field) != old_dataset_card_data_dict.get(yaml_field)] @dataclass class DatasetRemovalPlan(Plan): dataset: str storage_clients: Optional[list[StorageClient]] def __post_init__(self) -> None: super().__post_init__() self.add_task(DeleteDatasetWaitingJobsTask(dataset=self.dataset)) self.add_task(DeleteDatasetCacheEntriesTask(dataset=self.dataset)) if self.storage_clients: for storage_client in self.storage_clients: self.add_task(DeleteDatasetStorageTask(dataset=self.dataset, storage_client=storage_client)) def remove_dataset(dataset: str, storage_clients: Optional[list[StorageClient]]=None) -> TasksStatistics: plan = DatasetRemovalPlan(dataset=dataset, storage_clients=storage_clients) return plan.run() def set_revision(dataset: str, revision: str, priority: Priority, processing_graph: ProcessingGraph=processing_graph) -> TasksStatistics: logging.info(f'Analyzing {dataset}') plan = DatasetBackfillPlan(dataset=dataset, revision=revision, priority=priority, processing_graph=processing_graph, only_first_processing_steps=True) logging.info(f'Applying set_revision plan on {dataset}: plan={plan.as_response()}') return plan.run() def smart_set_revision(dataset: str, revision: str, hf_endpoint: str, old_revision: str, processing_graph: ProcessingGraph=processing_graph, storage_clients: Optional[list[StorageClient]]=None, hf_token: Optional[str]=None) -> TasksStatistics: logging.info(f'Analyzing {dataset} in a smart way') plan = SmartDatasetUpdatePlan(dataset=dataset, revision=revision, old_revision=old_revision, processing_graph=processing_graph, storage_clients=storage_clients, hf_endpoint=hf_endpoint, hf_token=hf_token) logging.info(f'Applying smart_set_revision plan on {dataset}: plan={plan.as_response()}') return plan.run() def backfill(dataset: str, revision: str, priority: Priority, processing_graph: ProcessingGraph=processing_graph) -> TasksStatistics: logging.info(f'Analyzing {dataset}') plan = DatasetBackfillPlan(dataset=dataset, revision=revision, priority=priority, processing_graph=processing_graph, only_first_processing_steps=False) logging.info(f'Applying backfill plan on {dataset}: plan={plan.as_response()}') return plan.run() def finish_job(job_result: JobResult, processing_graph: ProcessingGraph=processing_graph) -> TasksStatistics: job_info = job_result['job_info'] if not Queue().is_job_started(job_id=job_info['job_id']): logging.debug("the job was deleted, don't update the cache") return TasksStatistics() if not job_result['output']: Queue().finish_job(job_id=job_info['job_id']) logging.debug("the job raised an exception, don't update the cache") return TasksStatistics() output = job_result['output'] params = job_info['params'] try: processing_step = processing_graph.get_processing_step_by_job_type(job_info['type']) except ProcessingStepDoesNotExist as e: raise ValueError(f"Processing step for job type {job_info['type']} does not exist") from e try: previous_response = get_response_metadata(kind=processing_step.cache_kind, dataset=params['dataset'], config=params['config'], split=params['split']) failed_runs = previous_response['failed_runs'] + 1 if output['http_status'] != HTTPStatus.OK and previous_response['dataset_git_revision'] == params['revision'] else 0 except CachedArtifactNotFoundError: failed_runs = 0 upsert_response_params(kind=processing_step.cache_kind, job_params=params, job_runner_version=job_result['job_runner_version'], content=output['content'], http_status=output['http_status'], error_code=output['error_code'], details=output['details'], progress=output['progress'], failed_runs=failed_runs, duration=job_result['duration']) logging.debug('the job output has been written to the cache.') job_priority = Queue().finish_job(job_id=job_info['job_id']) if job_priority: job_info['priority'] = job_priority logging.debug('the job has been finished.') plan = AfterJobPlan(job_info=job_info, processing_graph=processing_graph, failed_runs=failed_runs) statistics = plan.run() logging.debug('jobs have been created for the next steps.') return statistics def has_pending_ancestor_jobs(dataset: str, processing_step_name: str, processing_graph: ProcessingGraph=processing_graph) -> bool: processing_step = processing_graph.get_processing_step(processing_step_name) ancestors = processing_graph.get_ancestors(processing_step_name) job_types = [ancestor.job_type for ancestor in ancestors] + [processing_step.job_type] logging.debug(f'looking at ancestor jobs of {processing_step_name}: {job_types}') return Queue().has_pending_jobs(dataset=dataset, job_types=job_types) def get_revision(dataset: str) -> Optional[str]: cache_kinds = [processing_step.cache_kind for processing_step in processing_graph.get_first_processing_steps()] cache_entries = get_cache_entries_df(dataset=dataset, cache_kinds=cache_kinds).to_dict(orient='list') if cache_entries.get('dataset_git_revision') and isinstance((revision := cache_entries['dataset_git_revision'][0]), str): return revision job_types = [processing_step.job_type for processing_step in processing_graph.get_first_processing_steps()] pending_jobs = Queue().get_pending_jobs_df(dataset=dataset, job_types=job_types).to_dict(orient='list') if pending_jobs.get('revision') and isinstance((revision := pending_jobs['revision'][0]), str): return revision return None # File: dataset-viewer-main/libs/libcommon/src/libcommon/parquet_utils.py import asyncio import logging import os from collections.abc import Iterable from dataclasses import dataclass, field from functools import lru_cache from pathlib import Path from typing import Literal, Optional, TypedDict, Union import numpy as np import pyarrow as pa import pyarrow.compute as pc import pyarrow.parquet as pq from datasets import Features, Value from datasets.features.features import FeatureType from datasets.utils.py_utils import size_str from fsspec.implementations.http import HTTPFile, HTTPFileSystem from huggingface_hub import HfFileSystem from pyarrow.lib import ArrowInvalid from libcommon.constants import CONFIG_PARQUET_METADATA_KIND from libcommon.prometheus import StepProfiler from libcommon.simple_cache import get_previous_step_or_raise from libcommon.storage import StrPath from libcommon.viewer_utils.features import get_supported_unsupported_columns PARTIAL_PREFIX = 'partial-' PART_SUFFIX = '-part{}' class EmptyParquetMetadataError(Exception): pass class ParquetResponseFormatError(Exception): pass class FileSystemError(Exception): pass class TooBigRows(Exception): pass class SchemaMismatchError(Exception): pass class ParquetFileMetadataItem(TypedDict): dataset: str config: str split: str url: str filename: str size: int num_rows: int parquet_metadata_subpath: str def parquet_export_is_partial(parquet_file_url: str) -> bool: split_directory_name_for_parquet_export = extract_split_directory_from_parquet_url(parquet_file_url) return split_directory_name_for_parquet_export.startswith(PARTIAL_PREFIX) def extract_split_directory_from_parquet_url(parquet_url: str) -> str: split_name = parquet_url.rsplit('/', 2)[1] return split_name def get_num_parquet_files_to_process(parquet_files: list[ParquetFileMetadataItem], parquet_metadata_directory: StrPath, max_size_bytes: int) -> tuple[int, int, int]: (num_parquet_files_to_process, num_bytes, num_rows) = (0, 0, 0) for (parquet_file_id, parquet_file) in enumerate(parquet_files): parquet_metadata_path = os.path.join(parquet_metadata_directory, parquet_file['parquet_metadata_subpath']) parquet_metadata = pq.read_metadata(parquet_metadata_path) num_parquet_files_to_process += 1 num_rows += parquet_metadata.num_rows for row_group_id in range(parquet_metadata.num_row_groups): num_bytes += parquet_metadata.row_group(row_group_id).total_byte_size if num_bytes > max_size_bytes: break return (num_parquet_files_to_process, num_bytes, num_rows) def is_list_pa_type(parquet_file_path: Path, feature_name: str) -> bool: feature_arrow_type = pq.read_schema(parquet_file_path).field(feature_name).type is_list: bool = pa.types.is_list(feature_arrow_type) or pa.types.is_large_list(feature_arrow_type) return is_list @dataclass class RowGroupReader: parquet_file: pq.ParquetFile group_id: int features: Features def read(self, columns: list[str]) -> pa.Table: return self.parquet_file.read_row_group(i=self.group_id, columns=columns) def read_truncated_binary(self, columns: list[str], max_binary_length: int) -> tuple[pa.Table, list[str]]: pa_table = self.parquet_file.read_row_group(i=self.group_id, columns=columns) truncated_columns: list[str] = [] if max_binary_length: for (field_idx, field) in enumerate(pa_table.schema): if self.features[field.name] == Value('binary') and pa_table[field_idx].nbytes > max_binary_length: truncated_array = pc.binary_slice(pa_table[field_idx], 0, max_binary_length // len(pa_table)) pa_table = pa_table.set_column(field_idx, field, truncated_array) truncated_columns.append(field.name) return (pa_table, truncated_columns) def read_size(self, columns: Optional[Iterable[str]]=None) -> int: if columns is None: return self.parquet_file.metadata.row_group(self.group_id).total_byte_size else: columns = set(columns) columns_metadata = self.parquet_file.metadata.row_group(self.group_id).to_dict()['columns'] return sum((column_metadata['total_uncompressed_size'] for column_metadata in columns_metadata if column_metadata['path_in_schema'] in columns)) @dataclass class ParquetIndexWithMetadata: features: Features supported_columns: list[str] unsupported_columns: list[str] parquet_files_urls: list[str] metadata_paths: list[str] num_bytes: list[int] num_rows: list[int] httpfs: HTTPFileSystem hf_token: Optional[str] max_arrow_data_in_memory: int partial: bool num_rows_total: int = field(init=False) def __post_init__(self) -> None: if self.httpfs._session is None: self.httpfs_session = asyncio.run(self.httpfs.set_session()) else: self.httpfs_session = self.httpfs._session self.num_rows_total = sum(self.num_rows) def query_truncated_binary(self, offset: int, length: int) -> tuple[pa.Table, list[str]]: all_columns = set(self.features) binary_columns = set((column for (column, feature) in self.features.items() if feature == Value('binary'))) if not binary_columns: return (self.query(offset=offset, length=length), []) with StepProfiler(method='parquet_index_with_metadata.query', step='get the parquet files than contain the requested rows'): parquet_file_offsets = np.cumsum(self.num_rows) last_row_in_parquet = parquet_file_offsets[-1] - 1 first_row = min(offset, last_row_in_parquet) last_row = min(offset + length - 1, last_row_in_parquet) (first_parquet_file_id, last_parquet_file_id) = np.searchsorted(parquet_file_offsets, [first_row, last_row], side='right') parquet_offset = offset - parquet_file_offsets[first_parquet_file_id - 1] if first_parquet_file_id > 0 else offset urls = self.parquet_files_urls[first_parquet_file_id:last_parquet_file_id + 1] metadata_paths = self.metadata_paths[first_parquet_file_id:last_parquet_file_id + 1] num_bytes = self.num_bytes[first_parquet_file_id:last_parquet_file_id + 1] with StepProfiler(method='parquet_index_with_metadata.query', step='load the remote parquet files using metadata from disk'): parquet_files = [pq.ParquetFile(HTTPFile(self.httpfs, url, session=self.httpfs_session, size=size, loop=self.httpfs.loop, cache_type=None, **self.httpfs.kwargs), metadata=pq.read_metadata(metadata_path), pre_buffer=True) for (url, metadata_path, size) in zip(urls, metadata_paths, num_bytes)] with StepProfiler(method='parquet_index_with_metadata.query', step='get the row groups than contain the requested rows'): row_group_offsets = np.cumsum([parquet_file.metadata.row_group(group_id).num_rows for parquet_file in parquet_files for group_id in range(parquet_file.metadata.num_row_groups)]) row_group_readers = [RowGroupReader(parquet_file=parquet_file, group_id=group_id, features=self.features) for parquet_file in parquet_files for group_id in range(parquet_file.metadata.num_row_groups)] if len(row_group_offsets) == 0 or row_group_offsets[-1] == 0: if offset < 0: raise IndexError('Offset must be non-negative') return (parquet_files[0].read(), []) last_row_in_parquet = row_group_offsets[-1] - 1 first_row = min(parquet_offset, last_row_in_parquet) last_row = min(parquet_offset + length - 1, last_row_in_parquet) (first_row_group_id, last_row_group_id) = np.searchsorted(row_group_offsets, [first_row, last_row], side='right') with StepProfiler(method='parquet_index_with_metadata.row_groups_size_check_truncated_binary', step='check if the rows can fit in memory'): in_memory_max_non_binary_size = sum([row_group_readers[i].read_size(columns=all_columns - binary_columns) for i in range(first_row_group_id, last_row_group_id + 1)]) in_memory_max_binary_size = max([row_group_readers[i].read_size(columns=binary_columns) for i in range(first_row_group_id, last_row_group_id + 1)]) in_memory_max_size = in_memory_max_non_binary_size + in_memory_max_binary_size if in_memory_max_size > self.max_arrow_data_in_memory: raise TooBigRows(f'Rows from parquet row groups are too big to be read: {size_str(in_memory_max_size)} (max={size_str(self.max_arrow_data_in_memory)})') with StepProfiler(method='parquet_index_with_metadata.query_truncated_binary', step='read the row groups'): max_binary_length = max(int((self.max_arrow_data_in_memory - in_memory_max_non_binary_size) / (last_row_group_id + 1 - first_row_group_id) / len(binary_columns) / 2), 20) try: pa_tables: list[pa.Table] = [] truncated_columns: set[str] = set() for i in range(first_row_group_id, last_row_group_id + 1): (rg_pa_table, rg_truncated_columns) = row_group_readers[i].read_truncated_binary(self.supported_columns, max_binary_length=max_binary_length) pa_tables.append(rg_pa_table) truncated_columns |= set(rg_truncated_columns) pa_table = pa.concat_tables(pa_tables) except ArrowInvalid as err: raise SchemaMismatchError('Parquet files have different schema.', err) first_row_in_pa_table = row_group_offsets[first_row_group_id - 1] if first_row_group_id > 0 else 0 return (pa_table.slice(parquet_offset - first_row_in_pa_table, length), list(truncated_columns)) def query(self, offset: int, length: int) -> pa.Table: with StepProfiler(method='parquet_index_with_metadata.query', step='get the parquet files than contain the requested rows'): parquet_file_offsets = np.cumsum(self.num_rows) last_row_in_parquet = parquet_file_offsets[-1] - 1 first_row = min(offset, last_row_in_parquet) last_row = min(offset + length - 1, last_row_in_parquet) (first_parquet_file_id, last_parquet_file_id) = np.searchsorted(parquet_file_offsets, [first_row, last_row], side='right') parquet_offset = offset - parquet_file_offsets[first_parquet_file_id - 1] if first_parquet_file_id > 0 else offset urls = self.parquet_files_urls[first_parquet_file_id:last_parquet_file_id + 1] metadata_paths = self.metadata_paths[first_parquet_file_id:last_parquet_file_id + 1] num_bytes = self.num_bytes[first_parquet_file_id:last_parquet_file_id + 1] with StepProfiler(method='parquet_index_with_metadata.query', step='load the remote parquet files using metadata from disk'): parquet_files = [pq.ParquetFile(HTTPFile(self.httpfs, url, session=self.httpfs_session, size=size, loop=self.httpfs.loop, cache_type=None, **self.httpfs.kwargs), metadata=pq.read_metadata(metadata_path), pre_buffer=True) for (url, metadata_path, size) in zip(urls, metadata_paths, num_bytes)] with StepProfiler(method='parquet_index_with_metadata.query', step='get the row groups than contain the requested rows'): row_group_offsets = np.cumsum([parquet_file.metadata.row_group(group_id).num_rows for parquet_file in parquet_files for group_id in range(parquet_file.metadata.num_row_groups)]) row_group_readers = [RowGroupReader(parquet_file=parquet_file, group_id=group_id, features=self.features) for parquet_file in parquet_files for group_id in range(parquet_file.metadata.num_row_groups)] if len(row_group_offsets) == 0 or row_group_offsets[-1] == 0: if offset < 0: raise IndexError('Offset must be non-negative') return parquet_files[0].read() last_row_in_parquet = row_group_offsets[-1] - 1 first_row = min(parquet_offset, last_row_in_parquet) last_row = min(parquet_offset + length - 1, last_row_in_parquet) (first_row_group_id, last_row_group_id) = np.searchsorted(row_group_offsets, [first_row, last_row], side='right') with StepProfiler(method='parquet_index_with_metadata.row_groups_size_check', step='check if the rows can fit in memory'): row_groups_size = sum([row_group_readers[i].read_size() for i in range(first_row_group_id, last_row_group_id + 1)]) if row_groups_size > self.max_arrow_data_in_memory: raise TooBigRows(f'Rows from parquet row groups are too big to be read: {size_str(row_groups_size)} (max={size_str(self.max_arrow_data_in_memory)})') with StepProfiler(method='parquet_index_with_metadata.query', step='read the row groups'): try: pa_table = pa.concat_tables([row_group_readers[i].read(self.supported_columns) for i in range(first_row_group_id, last_row_group_id + 1)]) except ArrowInvalid as err: raise SchemaMismatchError('Parquet files have different schema.', err) first_row_in_pa_table = row_group_offsets[first_row_group_id - 1] if first_row_group_id > 0 else 0 return pa_table.slice(parquet_offset - first_row_in_pa_table, length) @staticmethod def from_parquet_metadata_items(parquet_file_metadata_items: list[ParquetFileMetadataItem], features: Optional[Features], parquet_metadata_directory: StrPath, httpfs: HTTPFileSystem, hf_token: Optional[str], max_arrow_data_in_memory: int, unsupported_features: list[FeatureType]=[]) -> 'ParquetIndexWithMetadata': if not parquet_file_metadata_items: raise EmptyParquetMetadataError('No parquet files found.') partial = parquet_export_is_partial(parquet_file_metadata_items[0]['url']) with StepProfiler(method='parquet_index_with_metadata.from_parquet_metadata_items', step='get the index from parquet metadata'): try: parquet_files_metadata = sorted(parquet_file_metadata_items, key=lambda parquet_file_metadata: parquet_file_metadata['filename']) parquet_files_urls = [parquet_file_metadata['url'] for parquet_file_metadata in parquet_files_metadata] metadata_paths = [os.path.join(parquet_metadata_directory, parquet_file_metadata['parquet_metadata_subpath']) for parquet_file_metadata in parquet_files_metadata] num_bytes = [parquet_file_metadata['size'] for parquet_file_metadata in parquet_files_metadata] num_rows = [parquet_file_metadata['num_rows'] for parquet_file_metadata in parquet_files_metadata] except Exception as e: raise ParquetResponseFormatError(f'Could not parse the list of parquet files: {e}') from e with StepProfiler(method='parquet_index_with_metadata.from_parquet_metadata_items', step="get the dataset's features"): if features is None: features = Features.from_arrow_schema(pq.read_schema(metadata_paths[0])) (supported_columns, unsupported_columns) = get_supported_unsupported_columns(features, unsupported_features=unsupported_features) return ParquetIndexWithMetadata(features=features, supported_columns=supported_columns, unsupported_columns=unsupported_columns, parquet_files_urls=parquet_files_urls, metadata_paths=metadata_paths, num_bytes=num_bytes, num_rows=num_rows, httpfs=httpfs, hf_token=hf_token, max_arrow_data_in_memory=max_arrow_data_in_memory, partial=partial) class RowsIndex: def __init__(self, dataset: str, config: str, split: str, httpfs: HfFileSystem, hf_token: Optional[str], parquet_metadata_directory: StrPath, max_arrow_data_in_memory: int, unsupported_features: list[FeatureType]=[]): self.dataset = dataset self.config = config self.split = split self.httpfs = httpfs self.parquet_index = self._init_parquet_index(hf_token=hf_token, parquet_metadata_directory=parquet_metadata_directory, max_arrow_data_in_memory=max_arrow_data_in_memory, unsupported_features=unsupported_features) def _init_parquet_index(self, hf_token: Optional[str], parquet_metadata_directory: StrPath, max_arrow_data_in_memory: int, unsupported_features: list[FeatureType]=[]) -> ParquetIndexWithMetadata: with StepProfiler(method='rows_index._init_parquet_index', step='all'): with StepProfiler(method='rows_index._init_parquet_index', step='get list of parquet files for split'): response = get_previous_step_or_raise(kind=CONFIG_PARQUET_METADATA_KIND, dataset=self.dataset, config=self.config, split=None) self.revision = response['dataset_git_revision'] content = response['content'] if content.get('features'): features = Features.from_dict(content['features']) else: features = None logging.info(f'Create ParquetIndexWithMetadata for dataset={self.dataset}, config={self.config}, split={self.split}') return ParquetIndexWithMetadata.from_parquet_metadata_items([parquet_item for parquet_item in content['parquet_files_metadata'] if parquet_item['split'] == self.split and parquet_item['config'] == self.config], features=features, parquet_metadata_directory=parquet_metadata_directory, httpfs=self.httpfs, hf_token=hf_token, max_arrow_data_in_memory=max_arrow_data_in_memory, unsupported_features=unsupported_features) @lru_cache(maxsize=1) def query(self, offset: int, length: int) -> pa.Table: logging.info(f'Query {type(self.parquet_index).__name__} for dataset={self.dataset}, config={self.config}, split={self.split}, offset={offset}, length={length}') return self.parquet_index.query(offset=offset, length=length) @lru_cache(maxsize=1) def query_truncated_binary(self, offset: int, length: int) -> tuple[pa.Table, list[str]]: logging.info(f'Query {type(self.parquet_index).__name__} for dataset={self.dataset}, config={self.config}, split={self.split}, offset={offset}, length={length}, with truncated binary') return self.parquet_index.query_truncated_binary(offset=offset, length=length) class Indexer: def __init__(self, parquet_metadata_directory: StrPath, httpfs: HTTPFileSystem, max_arrow_data_in_memory: int, unsupported_features: list[FeatureType]=[], all_columns_supported_datasets_allow_list: Union[Literal['all'], list[str]]='all', hf_token: Optional[str]=None): self.parquet_metadata_directory = parquet_metadata_directory self.httpfs = httpfs self.hf_token = hf_token self.max_arrow_data_in_memory = max_arrow_data_in_memory self.unsupported_features = unsupported_features self.all_columns_supported_datasets_allow_list = all_columns_supported_datasets_allow_list @lru_cache(maxsize=1) def get_rows_index(self, dataset: str, config: str, split: str) -> RowsIndex: filter_features = self.all_columns_supported_datasets_allow_list != 'all' and dataset not in self.all_columns_supported_datasets_allow_list unsupported_features = self.unsupported_features if filter_features else [] return RowsIndex(dataset=dataset, config=config, split=split, httpfs=self.httpfs, hf_token=self.hf_token, parquet_metadata_directory=self.parquet_metadata_directory, max_arrow_data_in_memory=self.max_arrow_data_in_memory, unsupported_features=unsupported_features) # File: dataset-viewer-main/libs/libcommon/src/libcommon/processing_graph.py from __future__ import annotations from collections.abc import Mapping from dataclasses import dataclass, field from typing import Any, Literal, Optional, TypedDict, Union, get_args import networkx as nx from libcommon.constants import DEFAULT_DIFFICULTY, DEFAULT_INPUT_TYPE, DEFAULT_JOB_RUNNER_VERSION, MIN_BYTES_FOR_BONUS_DIFFICULTY from libcommon.utils import inputs_to_string InputType = Literal['dataset', 'config', 'split'] def guard_input_type(x: Any) -> InputType: if x == 'dataset': return 'dataset' elif x == 'config': return 'config' elif x == 'split': return 'split' if x in get_args(InputType): raise RuntimeError(f'Value {x} should be included in the literal values') raise ValueError(f'Invalid input type: {x}') def guard_int(x: Any) -> int: if isinstance(x, int): return x raise ValueError(f'Invalid int: {x}') def check_one_of_parents_is_same_or_higher_level(processing_graph: ProcessingGraph) -> None: first_steps = processing_graph.get_first_processing_steps() for step in processing_graph.get_topologically_ordered_processing_steps(): parents = processing_graph.get_parents(step.name) if parents: parent_input_types = [p.input_type for p in parents] if step.input_type == 'dataset': if 'dataset' not in parent_input_types: raise ValueError(f'Step {step.name} is a dataset-level step but none of its parents are dataset-level.') elif step.input_type == 'config': if 'dataset' not in parent_input_types and 'config' not in parent_input_types: raise ValueError(f'Step {step.name} is a config-level step but none of its parents are config-level or dataset-level.') else: if step not in first_steps: raise ValueError(f'Step {step.name} has not parents, but is not a root step.') if step.input_type != 'dataset': raise ValueError(f'Step {step.name} is a root step but is not a dataset-level step.') class ProcessingStepSpecification(TypedDict, total=False): input_type: InputType triggered_by: Union[list[str], str, None] job_runner_version: int difficulty: int bonus_difficulty_if_dataset_is_big: int ProcessingGraphSpecification = Mapping[str, ProcessingStepSpecification] class ProcessingStepDoesNotExist(Exception): pass @dataclass class ProcessingStep: name: str input_type: InputType job_runner_version: int difficulty: int bonus_difficulty_if_dataset_is_big: int cache_kind: str = field(init=False) job_type: str = field(init=False) def __post_init__(self) -> None: self.cache_kind = self.name self.job_type = self.name def copy(self) -> ProcessingStep: return ProcessingStep(name=self.name, input_type=self.input_type, job_runner_version=self.job_runner_version, difficulty=self.difficulty, bonus_difficulty_if_dataset_is_big=self.bonus_difficulty_if_dataset_is_big) def get_triggered_by_as_list(triggered_by: Union[list[str], str, None]) -> list[str]: if triggered_by is None: return [] return [triggered_by] if isinstance(triggered_by, str) else triggered_by def copy_processing_steps_list(processing_steps: list[ProcessingStep]) -> list[ProcessingStep]: return [processing_step.copy() for processing_step in processing_steps] @dataclass class ProcessingGraph: specification: ProcessingGraphSpecification min_bytes_for_bonus_difficulty: int = MIN_BYTES_FOR_BONUS_DIFFICULTY check_one_of_parents_is_same_or_higher_level: bool = True _nx_graph: nx.DiGraph = field(init=False) _processing_steps: Mapping[str, ProcessingStep] = field(init=False) _processing_step_names_by_input_type: Mapping[InputType, list[str]] = field(init=False) _first_processing_steps: list[ProcessingStep] = field(init=False) _topologically_ordered_processing_steps: list[ProcessingStep] = field(init=False) _alphabetically_ordered_processing_steps: list[ProcessingStep] = field(init=False) def __post_init__(self) -> None: _nx_graph = nx.DiGraph() _processing_steps: dict[str, ProcessingStep] = {} _processing_step_names_by_input_type: dict[InputType, list[str]] = {'dataset': [], 'config': [], 'split': []} for (name, specification) in self.specification.items(): input_type = guard_input_type(specification.get('input_type', DEFAULT_INPUT_TYPE)) if _nx_graph.has_node(name) or name in _processing_steps or name in _processing_step_names_by_input_type[input_type]: raise ValueError(f'Processing step {name} is defined twice.') _nx_graph.add_node(name) _processing_steps[name] = ProcessingStep(name=name, input_type=input_type, job_runner_version=specification.get('job_runner_version', DEFAULT_JOB_RUNNER_VERSION), difficulty=specification.get('difficulty', DEFAULT_DIFFICULTY), bonus_difficulty_if_dataset_is_big=specification.get('bonus_difficulty_if_dataset_is_big', 0)) if _processing_steps[name].bonus_difficulty_if_dataset_is_big and input_type == 'dataset': raise ValueError(f'Processing step {name} has bonus_difficulty_if_dataset_is_big but this field is not supported for dataset-level steps.') _processing_step_names_by_input_type[input_type].append(name) for (name, specification) in self.specification.items(): triggered_by = get_triggered_by_as_list(specification.get('triggered_by')) for processing_step_name in triggered_by: if not _nx_graph.has_node(processing_step_name): raise ValueError(f'Processing step {name} is triggered by {processing_step_name} but {processing_step_name} is not defined.') _nx_graph.add_edge(processing_step_name, name) if not nx.is_directed_acyclic_graph(_nx_graph): raise ValueError('The graph is not a directed acyclic graph.') self._nx_graph = _nx_graph self._processing_steps = _processing_steps self._processing_step_names_by_input_type = _processing_step_names_by_input_type self._first_processing_steps = [self._processing_steps[processing_step_name] for (processing_step_name, degree) in _nx_graph.in_degree() if degree == 0] if any((processing_step.input_type != 'dataset' for processing_step in self._first_processing_steps)): raise ValueError('The first processing steps must be dataset-level. The graph state is incoherent.') self._topologically_ordered_processing_steps = [self.get_processing_step(processing_step_name) for processing_step_name in nx.topological_sort(_nx_graph)] self._alphabetically_ordered_processing_steps = [self.get_processing_step(processing_step_name) for processing_step_name in sorted(_nx_graph.nodes())] if self.check_one_of_parents_is_same_or_higher_level: check_one_of_parents_is_same_or_higher_level(self) def get_processing_step(self, processing_step_name: str) -> ProcessingStep: try: return self._processing_steps[processing_step_name].copy() except nx.NetworkXError as e: raise ProcessingStepDoesNotExist(f'Unknown job type: {processing_step_name}') from e def get_processing_step_by_job_type(self, job_type: str) -> ProcessingStep: return self.get_processing_step(job_type) def get_children(self, processing_step_name: str) -> list[ProcessingStep]: try: return [self.get_processing_step(successor) for successor in self._nx_graph.successors(processing_step_name)] except nx.NetworkXError as e: raise ProcessingStepDoesNotExist(f'Unknown processing step: {processing_step_name}') from e def get_parents(self, processing_step_name: str) -> list[ProcessingStep]: try: return [self.get_processing_step(predecessor) for predecessor in self._nx_graph.predecessors(processing_step_name)] except nx.NetworkXError as e: raise ProcessingStepDoesNotExist(f'Unknown processing step: {processing_step_name}') from e def get_ancestors(self, processing_step_name: str) -> list[ProcessingStep]: try: return [self.get_processing_step(ancestor) for ancestor in nx.ancestors(self._nx_graph, processing_step_name)] except nx.NetworkXError as e: raise ProcessingStepDoesNotExist(f'Unknown processing step: {processing_step_name}') from e def get_first_processing_steps(self) -> list[ProcessingStep]: return copy_processing_steps_list(self._first_processing_steps) def get_topologically_ordered_processing_steps(self) -> list[ProcessingStep]: return copy_processing_steps_list(self._topologically_ordered_processing_steps) def get_alphabetically_ordered_processing_steps(self) -> list[ProcessingStep]: return copy_processing_steps_list(self._alphabetically_ordered_processing_steps) def get_processing_steps(self, order: Optional[Literal['alphabetical', 'topological']]=None) -> list[ProcessingStep]: if order == 'topological': return self.get_topologically_ordered_processing_steps() return self.get_alphabetically_ordered_processing_steps() def get_input_type_processing_steps(self, input_type: InputType='dataset') -> list[ProcessingStep]: return [self.get_processing_step(processing_step_name) for processing_step_name in self._processing_step_names_by_input_type[input_type]] @dataclass class Artifact: processing_step: ProcessingStep dataset: str revision: str config: Optional[str] split: Optional[str] id: str = field(init=False) def __post_init__(self) -> None: if self.processing_step.input_type == 'dataset': if self.config is not None or self.split is not None: raise ValueError('Step input type is dataset, but config or split is not None') elif self.processing_step.input_type == 'config': if self.config is None or self.split is not None: raise ValueError('Step input type is config, but config is None or split is not None') elif self.processing_step.input_type == 'split': if self.config is None or self.split is None: raise ValueError('Step input type is split, but config or split is None') else: raise ValueError(f'Invalid step input type: {self.processing_step.input_type}') self.id = Artifact.get_id(dataset=self.dataset, revision=self.revision, config=self.config, split=self.split, processing_step_name=self.processing_step.name) @staticmethod def get_id(dataset: str, revision: str, config: Optional[str], split: Optional[str], processing_step_name: str) -> str: return inputs_to_string(dataset=dataset, revision=revision, config=config, split=split, prefix=processing_step_name) @staticmethod def parse_id(id: str) -> tuple[str, str, Optional[str], Optional[str], str]: parts = id.split(',') prefix = parts[0] parts = parts[1:] dataset = parts[0] revision = parts[1] parts = parts[2:] config = None split = None if len(parts) > 0: config = parts[0] if len(parts) > 1: split = parts[1] return (dataset, revision, config, split, prefix) specification: ProcessingGraphSpecification = {'dataset-config-names': {'input_type': 'dataset', 'job_runner_version': 1, 'difficulty': 50}, 'split-first-rows': {'input_type': 'split', 'triggered_by': ['config-split-names', 'config-parquet-metadata'], 'job_runner_version': 4, 'difficulty': 70}, 'config-parquet-and-info': {'input_type': 'config', 'triggered_by': 'dataset-config-names', 'job_runner_version': 4, 'difficulty': 70}, 'config-parquet': {'input_type': 'config', 'triggered_by': 'config-parquet-and-info', 'job_runner_version': 6, 'difficulty': 20}, 'config-parquet-metadata': {'input_type': 'config', 'triggered_by': 'config-parquet', 'job_runner_version': 2, 'difficulty': 50}, 'dataset-parquet': {'input_type': 'dataset', 'triggered_by': ['dataset-config-names', 'config-parquet'], 'job_runner_version': 2, 'difficulty': 20}, 'config-info': {'input_type': 'config', 'triggered_by': 'config-parquet-and-info', 'job_runner_version': 2, 'difficulty': 20}, 'dataset-info': {'input_type': 'dataset', 'triggered_by': ['dataset-config-names', 'config-info'], 'job_runner_version': 2, 'difficulty': 20}, 'config-split-names': {'input_type': 'config', 'triggered_by': ['dataset-config-names', 'config-info'], 'job_runner_version': 3, 'difficulty': 60}, 'config-size': {'input_type': 'config', 'triggered_by': 'config-parquet-and-info', 'job_runner_version': 2, 'difficulty': 20}, 'dataset-size': {'input_type': 'dataset', 'triggered_by': ['dataset-config-names', 'config-size'], 'job_runner_version': 2, 'difficulty': 20}, 'dataset-split-names': {'input_type': 'dataset', 'triggered_by': ['dataset-config-names', 'config-split-names'], 'job_runner_version': 3, 'difficulty': 20}, 'split-descriptive-statistics': {'input_type': 'split', 'triggered_by': 'config-parquet-metadata', 'job_runner_version': 3, 'difficulty': 70, 'bonus_difficulty_if_dataset_is_big': 20}, 'split-is-valid': {'input_type': 'split', 'triggered_by': ['config-size', 'split-first-rows', 'split-duckdb-index', 'split-descriptive-statistics'], 'job_runner_version': 4, 'difficulty': 20}, 'config-is-valid': {'input_type': 'config', 'triggered_by': ['config-split-names', 'split-is-valid'], 'job_runner_version': 4, 'difficulty': 20}, 'dataset-is-valid': {'input_type': 'dataset', 'triggered_by': ['dataset-config-names', 'config-is-valid'], 'job_runner_version': 8, 'difficulty': 20}, 'split-image-url-columns': {'input_type': 'split', 'triggered_by': 'split-first-rows', 'job_runner_version': 1, 'difficulty': 40}, 'split-opt-in-out-urls-scan': {'input_type': 'split', 'triggered_by': 'split-image-url-columns', 'job_runner_version': 4, 'difficulty': 70}, 'split-opt-in-out-urls-count': {'input_type': 'split', 'triggered_by': 'split-opt-in-out-urls-scan', 'job_runner_version': 2, 'difficulty': 20}, 'config-opt-in-out-urls-count': {'input_type': 'config', 'triggered_by': ['config-split-names', 'split-opt-in-out-urls-count'], 'job_runner_version': 3, 'difficulty': 20}, 'dataset-opt-in-out-urls-count': {'input_type': 'dataset', 'triggered_by': ['dataset-config-names', 'config-opt-in-out-urls-count'], 'job_runner_version': 2, 'difficulty': 20}, 'split-presidio-scan': {'input_type': 'split', 'triggered_by': 'config-parquet-metadata', 'job_runner_version': 1, 'difficulty': 70}, 'dataset-presidio-entities-count': {'input_type': 'dataset', 'triggered_by': ['dataset-split-names', 'split-presidio-scan'], 'job_runner_version': 1, 'difficulty': 20}, 'split-duckdb-index': {'input_type': 'split', 'triggered_by': 'config-parquet-metadata', 'job_runner_version': 3, 'difficulty': 70, 'bonus_difficulty_if_dataset_is_big': 20}, 'config-duckdb-index-size': {'input_type': 'config', 'triggered_by': ['config-split-names', 'split-duckdb-index'], 'job_runner_version': 2, 'difficulty': 20}, 'dataset-duckdb-index-size': {'input_type': 'dataset', 'triggered_by': ['dataset-config-names', 'config-duckdb-index-size'], 'job_runner_version': 1, 'difficulty': 20}, 'dataset-hub-cache': {'input_type': 'dataset', 'triggered_by': ['dataset-is-valid', 'dataset-size', 'dataset-compatible-libraries', 'dataset-modalities'], 'job_runner_version': 3, 'difficulty': 20}, 'dataset-compatible-libraries': {'input_type': 'dataset', 'triggered_by': 'dataset-info', 'job_runner_version': 6, 'difficulty': 20}, 'dataset-modalities': {'input_type': 'dataset', 'triggered_by': ['dataset-info', 'dataset-filetypes', 'split-image-url-columns'], 'job_runner_version': 2, 'difficulty': 20}, 'dataset-croissant-crumbs': {'input_type': 'dataset', 'triggered_by': 'dataset-info', 'job_runner_version': 2, 'difficulty': 20}, 'dataset-filetypes': {'input_type': 'dataset', 'job_runner_version': 1, 'difficulty': 50}} processing_graph = ProcessingGraph(specification=specification) # File: dataset-viewer-main/libs/libcommon/src/libcommon/prometheus.py import os import time from types import TracebackType from typing import Any, Optional, TypeVar from prometheus_client import REGISTRY, CollectorRegistry, Gauge, Histogram, generate_latest from prometheus_client.multiprocess import MultiProcessCollector from psutil import disk_usage from libcommon.constants import LONG_DURATION_PROMETHEUS_HISTOGRAM_BUCKETS from libcommon.queue.metrics import JobTotalMetricDocument, WorkerSizeJobsCountDocument from libcommon.simple_cache import CacheTotalMetricDocument from libcommon.storage import StrPath class Prometheus: def getRegistry(self) -> CollectorRegistry: if 'PROMETHEUS_MULTIPROC_DIR' in os.environ: registry = CollectorRegistry() MultiProcessCollector(registry=registry) else: registry = REGISTRY return registry def getLatestContent(self) -> Any: latest = generate_latest(self.getRegistry()) return latest.decode('utf-8') QUEUE_JOBS_TOTAL = Gauge(name='queue_jobs_total', documentation='Number of jobs in the queue', labelnames=['queue', 'status', 'dataset_status'], multiprocess_mode='liveall') WORKER_SIZE_JOBS_COUNT = Gauge(name='worker_size_jobs_count', documentation='Number of jobs per worker size', labelnames=['worker_size'], multiprocess_mode='liveall') RESPONSES_IN_CACHE_TOTAL = Gauge(name='responses_in_cache_total', documentation='Number of cached responses in the cache', labelnames=['kind', 'http_status', 'error_code'], multiprocess_mode='liveall') PARQUET_METADATA_DISK_USAGE = Gauge(name='parquet_metadata_disk_usage', documentation='Usage of the disk where the parquet metadata are stored (workers, used by /rows)', labelnames=['type'], multiprocess_mode='liveall') METHOD_STEPS_PROCESSING_TIME = Histogram('method_steps_processing_time_seconds', 'Histogram of the processing time of specific steps in methods for a given context (in seconds)', ['method', 'step']) METHOD_LONG_STEPS_PROCESSING_TIME = Histogram('method_long_steps_processing_time_seconds', 'Histogram of the processing time of specific long steps in methods for a given context (in seconds)', ['method', 'step'], buckets=LONG_DURATION_PROMETHEUS_HISTOGRAM_BUCKETS) def update_queue_jobs_total() -> None: for job_metric in JobTotalMetricDocument.objects(): QUEUE_JOBS_TOTAL.labels(queue=job_metric.job_type, status=job_metric.status, dataset_status=job_metric.dataset_status).set(job_metric.total) def update_worker_size_jobs_count() -> None: for jobs_count in WorkerSizeJobsCountDocument.objects(): WORKER_SIZE_JOBS_COUNT.labels(worker_size=jobs_count.worker_size.value).set(jobs_count.jobs_count) def update_responses_in_cache_total() -> None: for cache_metric in CacheTotalMetricDocument.objects(): RESPONSES_IN_CACHE_TOTAL.labels(kind=cache_metric.kind, http_status=cache_metric.http_status, error_code=cache_metric.error_code).set(cache_metric.total) def update_disk_gauge(gauge: Gauge, directory: StrPath) -> None: (total, used, free, percent) = disk_usage(str(directory)) gauge.labels(type='total').set(total) gauge.labels(type='used').set(used) gauge.labels(type='free').set(free) gauge.labels(type='percent').set(percent) def update_parquet_metadata_disk_usage(directory: StrPath) -> None: update_disk_gauge(PARQUET_METADATA_DISK_USAGE, directory) T = TypeVar('T', bound='StepProfiler') class StepProfiler: def __init__(self, method: str, step: str, histogram: Optional[Histogram]=None): self.histogram = METHOD_STEPS_PROCESSING_TIME if histogram is None else histogram self.method = method self.step = step self.before_time = time.perf_counter() def __enter__(self: T) -> T: return self def __exit__(self, exc_type: Optional[type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[TracebackType]) -> None: after_time = time.perf_counter() self.histogram.labels(method=self.method, step=self.step).observe(after_time - self.before_time) class LongStepProfiler(StepProfiler): def __init__(self, method: str, step: str): super().__init__(method, step, histogram=METHOD_LONG_STEPS_PROCESSING_TIME) # File: dataset-viewer-main/libs/libcommon/src/libcommon/queue/dataset_blockages.py import types from typing import Generic, TypeVar from mongoengine import Document from mongoengine.fields import DateTimeField, StringField from mongoengine.queryset.queryset import QuerySet from libcommon.constants import QUEUE_COLLECTION_DATASET_BLOCKAGES, QUEUE_MONGOENGINE_ALIAS from libcommon.utils import get_datetime U = TypeVar('U', bound=Document) def no_op(self, _): return self QuerySet.__class_getitem__ = types.MethodType(no_op, QuerySet) class QuerySetManager(Generic[U]): def __get__(self, instance: object, cls: type[U]) -> QuerySet[U]: return QuerySet(cls, cls._get_collection()) DATASET_BLOCKAGE_EXPIRE_AFTER_SECONDS = 6 * 60 * 60 DATASET_STATUS_NORMAL = 'normal' DATASET_STATUS_BLOCKED = 'blocked' class DatasetBlockageDocument(Document): meta = {'collection': QUEUE_COLLECTION_DATASET_BLOCKAGES, 'db_alias': QUEUE_MONGOENGINE_ALIAS, 'indexes': ['dataset', {'name': 'DATASET_BLOCKAGE_EXPIRE_AFTER_SECONDS_BLUE', 'fields': ['blocked_at'], 'expireAfterSeconds': DATASET_BLOCKAGE_EXPIRE_AFTER_SECONDS}]} dataset = StringField(required=True) blocked_at = DateTimeField(required=True) objects = QuerySetManager['DatasetBlockageDocument']() def block_dataset(dataset: str) -> None: DatasetBlockageDocument(dataset=dataset, blocked_at=get_datetime()).save() def get_blocked_datasets() -> list[str]: return DatasetBlockageDocument.objects().distinct('dataset') def is_blocked(dataset: str) -> bool: return DatasetBlockageDocument.objects(dataset=dataset).count() > 0 # File: dataset-viewer-main/libs/libcommon/src/libcommon/queue/jobs.py import contextlib import logging import types from collections import Counter from collections.abc import Mapping from datetime import datetime, timedelta from itertools import groupby from operator import itemgetter from typing import Any, Generic, Optional, TypedDict, TypeVar from uuid import uuid4 import bson import pandas as pd import pyarrow as pa import pytz from mongoengine import Document from mongoengine.errors import DoesNotExist from mongoengine.fields import DateTimeField, EnumField, IntField, StringField from mongoengine.queryset.queryset import QuerySet from pymongoarrow.api import Schema, find_pandas_all from libcommon.constants import DEFAULT_DIFFICULTY_MAX, DEFAULT_DIFFICULTY_MIN, QUEUE_COLLECTION_JOBS, QUEUE_MONGOENGINE_ALIAS from libcommon.dtos import FlatJobInfo, JobInfo, Priority, Status, WorkerSize from libcommon.queue.dataset_blockages import DATASET_STATUS_BLOCKED, DATASET_STATUS_NORMAL, get_blocked_datasets from libcommon.queue.lock import lock, release_lock from libcommon.queue.metrics import decrease_metric, decrease_worker_size_metrics, increase_metric, update_metrics_for_type from libcommon.queue.past_jobs import create_past_job from libcommon.utils import get_datetime, inputs_to_string U = TypeVar('U', bound=Document) def no_op(self, _): return self QuerySet.__class_getitem__ = types.MethodType(no_op, QuerySet) class QuerySetManager(Generic[U]): def __get__(self, instance: object, cls: type[U]) -> QuerySet[U]: return QuerySet(cls, cls._get_collection()) JobsTotalByTypeStatusAndDatasetStatus = Mapping[tuple[str, str, str], int] JobsCountByWorkerSize = Mapping[str, int] class JobDict(TypedDict): type: str dataset: str revision: str config: Optional[str] split: Optional[str] unicity_id: str namespace: str priority: str status: str difficulty: int created_at: datetime started_at: Optional[datetime] last_heartbeat: Optional[datetime] class DumpByPendingStatus(TypedDict): waiting: list[JobDict] started: list[JobDict] class EmptyQueueError(Exception): pass class JobDoesNotExistError(DoesNotExist): pass class AlreadyStartedJobError(Exception): pass class LockTimeoutError(Exception): pass class NoWaitingJobError(Exception): pass class StartedJobError(Exception): pass class JobQueryFilters(TypedDict, total=False): difficulty__gt: int difficulty__lte: int dataset__nin: list[str] PA_SCHEMA = Schema({'_id': bson.ObjectId, 'type': pa.string(), 'dataset': pa.string(), 'revision': pa.string(), 'config': pa.string(), 'split': pa.string(), 'priority': pa.string(), 'status': pa.string(), 'created_at': pa.timestamp('ms'), 'difficulty': pa.int64()}) class JobDocument(Document): meta = {'collection': QUEUE_COLLECTION_JOBS, 'db_alias': QUEUE_MONGOENGINE_ALIAS, 'indexes': [('dataset', 'status'), ('type', 'dataset', 'status'), ('priority', 'status', 'created_at', 'namespace', 'difficulty', 'dataset', 'unicity_id'), ('priority', 'status', 'created_at', 'difficulty', 'dataset', 'namespace'), ('priority', 'status', 'type', 'namespace', 'unicity_id', 'created_at', '-difficulty'), ('status', 'type'), ('unicity_id', 'status', '-created_at')]} type = StringField(required=True) dataset = StringField(required=True) revision = StringField(required=True) config = StringField() split = StringField() unicity_id = StringField(required=True) namespace = StringField(required=True) priority = EnumField(Priority, default=Priority.LOW) status = EnumField(Status, default=Status.WAITING) difficulty = IntField(required=True) created_at = DateTimeField(required=True) started_at = DateTimeField() last_heartbeat = DateTimeField() def to_dict(self) -> JobDict: return {'type': self.type, 'dataset': self.dataset, 'revision': self.revision, 'config': self.config, 'split': self.split, 'unicity_id': self.unicity_id, 'namespace': self.namespace, 'priority': self.priority.value, 'status': self.status.value, 'difficulty': self.difficulty, 'created_at': self.created_at, 'started_at': self.started_at, 'last_heartbeat': self.last_heartbeat} objects = QuerySetManager['JobDocument']() @classmethod def fetch_as_df(cls, query: Optional[Mapping[str, Any]]=None) -> pd.DataFrame: query = query if query is not None else {} collection = cls._get_collection() return find_pandas_all(collection, query, schema=PA_SCHEMA) def info(self) -> JobInfo: return JobInfo({'job_id': str(self.pk), 'type': self.type, 'params': {'dataset': self.dataset, 'revision': self.revision, 'config': self.config, 'split': self.split}, 'priority': self.priority, 'difficulty': self.difficulty, 'started_at': self.started_at}) @classmethod def get(cls, job_id: str) -> 'JobDocument': try: return cls.objects(pk=job_id).get() except DoesNotExist as e: raise JobDoesNotExistError(f'Job does not exist: job_id={job_id!r}') from e def flat_info(self) -> FlatJobInfo: return FlatJobInfo({'job_id': str(self.pk), 'type': self.type, 'dataset': self.dataset, 'revision': self.revision, 'config': self.config, 'split': self.split, 'priority': self.priority.value, 'status': self.status.value, 'difficulty': self.difficulty, 'created_at': self.created_at}) class Queue: def add_job(self, job_type: str, dataset: str, revision: str, difficulty: int, config: Optional[str]=None, split: Optional[str]=None, priority: Priority=Priority.LOW) -> JobDocument: increase_metric(dataset=dataset, job_type=job_type, status=Status.WAITING, difficulty=difficulty) return JobDocument(type=job_type, dataset=dataset, revision=revision, config=config, split=split, unicity_id=inputs_to_string(revision=revision, dataset=dataset, config=config, split=split, prefix=job_type), namespace=dataset.split('/')[0], priority=priority, created_at=get_datetime(), status=Status.WAITING, difficulty=difficulty).save() def create_jobs(self, job_infos: list[JobInfo]) -> int: try: jobs = [JobDocument(type=job_info['type'], dataset=job_info['params']['dataset'], revision=job_info['params']['revision'], config=job_info['params']['config'], split=job_info['params']['split'], unicity_id=inputs_to_string(revision=job_info['params']['revision'], dataset=job_info['params']['dataset'], config=job_info['params']['config'], split=job_info['params']['split'], prefix=job_info['type']), namespace=job_info['params']['dataset'].split('/')[0], priority=job_info['priority'], created_at=get_datetime(), status=Status.WAITING, difficulty=job_info['difficulty']) for job_info in job_infos] for job in jobs: increase_metric(dataset=job.dataset, job_type=job.type, status=Status.WAITING, difficulty=job.difficulty) job_ids = JobDocument.objects.insert(jobs, load_bulk=False) return len(job_ids) except Exception: return 0 def delete_waiting_jobs_by_job_id(self, job_ids: list[str]) -> int: try: existing = JobDocument.objects(pk__in=job_ids, status=Status.WAITING) for job in existing.all(): decrease_metric(job_type=job.type, status=job.status, difficulty=job.difficulty) deleted_jobs = existing.delete() return 0 if deleted_jobs is None else deleted_jobs except Exception: return 0 def _get_next_waiting_job_for_priority(self, priority: Priority, difficulty_min: Optional[int]=None, difficulty_max: Optional[int]=None) -> JobDocument: logging.debug(f'Getting next waiting job for priority {priority}') blocked_datasets = get_blocked_datasets() logging.debug(f'Blocked datasets: {blocked_datasets}') filters: JobQueryFilters = {} if difficulty_min is not None and difficulty_min > DEFAULT_DIFFICULTY_MIN: filters['difficulty__gt'] = difficulty_min if difficulty_max is not None and difficulty_max < DEFAULT_DIFFICULTY_MAX: filters['difficulty__lte'] = difficulty_max if blocked_datasets: filters['dataset__nin'] = blocked_datasets started_jobs = JobDocument.objects(status=Status.STARTED, **filters) logging.debug(f'Number of started jobs: {started_jobs.count()}') started_job_namespaces = [job.namespace for job in started_jobs.only('namespace')] logging.debug(f'Started job namespaces: {started_job_namespaces}') next_waiting_job = JobDocument.objects(status=Status.WAITING, namespace__nin=set(started_job_namespaces), priority=priority, **filters).order_by('+created_at').only('type', 'dataset', 'revision', 'config', 'split', 'priority', 'unicity_id').no_cache().first() if next_waiting_job is not None: return next_waiting_job logging.debug('No waiting job for namespace without started job') started_unicity_ids = {job.unicity_id for job in started_jobs.only('unicity_id')} descending_frequency_namespace_counts = [[namespace, count] for (namespace, count) in Counter(started_job_namespaces).most_common()] logging.debug(f'Descending frequency namespace counts: {descending_frequency_namespace_counts}') descending_frequency_namespace_groups = [[item[0] for item in data] for (_, data) in groupby(descending_frequency_namespace_counts, itemgetter(1))] while descending_frequency_namespace_groups: least_common_namespaces_group = descending_frequency_namespace_groups.pop() logging.debug(f'Least common namespaces group: {least_common_namespaces_group}') next_waiting_job = JobDocument.objects(status=Status.WAITING, namespace__in=least_common_namespaces_group, unicity_id__nin=started_unicity_ids, priority=priority, **filters).order_by('+created_at').only('type', 'dataset', 'revision', 'config', 'split', 'priority', 'unicity_id').no_cache().first() if next_waiting_job is not None: return next_waiting_job raise EmptyQueueError('no job available with the priority') def get_next_waiting_job(self, difficulty_min: Optional[int]=None, difficulty_max: Optional[int]=None) -> JobDocument: for priority in [Priority.HIGH, Priority.NORMAL, Priority.LOW]: with contextlib.suppress(EmptyQueueError): return self._get_next_waiting_job_for_priority(priority=priority, difficulty_min=difficulty_min, difficulty_max=difficulty_max) raise EmptyQueueError('no job available') def _start_newest_job_and_delete_others(self, job: JobDocument) -> JobDocument: RETRIES = 20 lock_owner = str(uuid4()) try: with lock(key=job.unicity_id, owner=lock_owner, sleeps=[0.1] * RETRIES, ttl=lock.TTL.LOCK_TTL_SECONDS_TO_START_JOB): waiting_jobs = JobDocument.objects(unicity_id=job.unicity_id).order_by('-created_at') datetime = get_datetime() num_started_jobs = waiting_jobs(status=Status.STARTED).count() if num_started_jobs > 0: if num_started_jobs > 1: logging.critical(f'job {job.unicity_id} has been started {num_started_jobs} times. Max is 1.') raise AlreadyStartedJobError(f'job {job.unicity_id} has been started by another worker') first_job = waiting_jobs.first() if not first_job: raise NoWaitingJobError(f'no waiting job could be found for {job.unicity_id}') if not JobDocument.objects(pk=str(first_job.pk), status=Status.WAITING).update(started_at=datetime, status=Status.STARTED, write_concern={'w': 'majority', 'fsync': True}, read_concern={'level': 'majority'}): raise AlreadyStartedJobError(f'job {job.unicity_id} has been started by another worker') update_metrics_for_type(dataset=first_job.dataset, job_type=first_job.type, previous_status=Status.WAITING, new_status=Status.STARTED, difficulty=first_job.difficulty) self.delete_waiting_jobs_by_job_id(job_ids=[job.pk for job in waiting_jobs if job.pk != first_job.pk]) return first_job.reload() except TimeoutError as err: raise LockTimeoutError(f'could not acquire the lock for job {job.unicity_id} after {RETRIES} retries.') from err def start_job(self, difficulty_min: Optional[int]=None, difficulty_max: Optional[int]=None) -> JobInfo: logging.debug('looking for a job to start') next_waiting_job = self.get_next_waiting_job(difficulty_min=difficulty_min, difficulty_max=difficulty_max) logging.debug(f'job found: {next_waiting_job}') started_job = self._start_newest_job_and_delete_others(job=next_waiting_job) return started_job.info() def get_job_with_id(self, job_id: str) -> JobDocument: return JobDocument.objects(pk=job_id).get() def get_job_type(self, job_id: str) -> str: job = self.get_job_with_id(job_id=job_id) return job.type def _get_started_job(self, job_id: str) -> JobDocument: job = JobDocument.objects(pk=job_id).get() if job.status is not Status.STARTED: raise StartedJobError(f'job {job.unicity_id} has a not the STARTED status ({job.status.value}).') if job.started_at is None: raise StartedJobError(f'job {job.unicity_id} has an empty started_at field.') return job def is_job_started(self, job_id: str) -> bool: try: self._get_started_job(job_id=job_id) except DoesNotExist: logging.error(f'job {job_id} does not exist.') return False except StartedJobError as e: logging.debug(f'job {job_id} has not the expected format for a started job: {e}') return False return True def finish_job(self, job_id: str) -> Optional[Priority]: try: job = self._get_started_job(job_id=job_id) except DoesNotExist: logging.error(f'job {job_id} does not exist. Aborting.') return None except StartedJobError as e: logging.error(f'job {job_id} has not the expected format for a started job. Aborting: {e}') return None decrease_metric(job_type=job.type, status=job.status, difficulty=job.difficulty) was_blocked = False if job.started_at is not None: was_blocked = create_past_job(dataset=job.dataset, started_at=pytz.UTC.localize(job.started_at), finished_at=get_datetime()) job_priority = job.priority job.delete() release_lock(key=job.unicity_id) if was_blocked: pending_jobs = self.get_pending_jobs_df(dataset=job.dataset) for (_, pending_job) in pending_jobs.iterrows(): decrease_worker_size_metrics(pending_job['difficulty']) return job_priority def delete_dataset_waiting_jobs(self, dataset: str) -> int: existing_waiting_jobs = JobDocument.objects(dataset=dataset, status=Status.WAITING) for job in existing_waiting_jobs.no_cache(): decrease_metric(job_type=job.type, status=job.status, difficulty=job.difficulty) release_lock(key=job.unicity_id) num_deleted_jobs = existing_waiting_jobs.delete() return 0 if num_deleted_jobs is None else num_deleted_jobs def is_job_in_process(self, job_type: str, dataset: str, revision: str, config: Optional[str]=None, split: Optional[str]=None) -> bool: return JobDocument.objects(type=job_type, dataset=dataset, revision=revision, config=config, split=split).count() > 0 def _get_df(self, jobs: list[FlatJobInfo]) -> pd.DataFrame: return pd.DataFrame({'job_id': pd.Series([job['job_id'] for job in jobs], dtype='str'), 'type': pd.Series([job['type'] for job in jobs], dtype='category'), 'dataset': pd.Series([job['dataset'] for job in jobs], dtype='str'), 'revision': pd.Series([job['revision'] for job in jobs], dtype='str'), 'config': pd.Series([job['config'] for job in jobs], dtype='str'), 'split': pd.Series([job['split'] for job in jobs], dtype='str'), 'priority': pd.Categorical([job['priority'] for job in jobs], ordered=True, categories=[Priority.LOW.value, Priority.NORMAL.value, Priority.HIGH.value]), 'status': pd.Categorical([job['status'] for job in jobs], ordered=True, categories=[Status.WAITING.value, Status.STARTED.value]), 'created_at': pd.Series([job['created_at'] for job in jobs], dtype='datetime64[ns]')}) def get_pending_jobs_df(self, dataset: str, job_types: Optional[list[str]]=None) -> pd.DataFrame: filters = {'dataset': dataset} if job_types: filters['type'] = {'$in': job_types} df = JobDocument.fetch_as_df(query=filters) df.rename(columns={'_id': 'job_id'}, inplace=True) df['priority'] = pd.Categorical(df['priority'], ordered=True, categories=[Priority.LOW.value, Priority.NORMAL.value, Priority.HIGH.value]) df['status'] = pd.Categorical(df['status'], ordered=True, categories=[Status.WAITING.value, Status.STARTED.value]) return df def has_pending_jobs(self, dataset: str, job_types: Optional[list[str]]=None) -> bool: filters = {} if job_types: filters['type__in'] = job_types return JobDocument.objects(**filters, dataset=dataset).count() > 0 def get_jobs_total_by_type_status_and_dataset_status(self) -> JobsTotalByTypeStatusAndDatasetStatus: blocked_datasets = get_blocked_datasets() return {(metric['job_type'], metric['status'], metric['dataset_status']): metric['total'] for metric in JobDocument.objects().aggregate([{'$sort': {'type': 1, 'status': 1}}, {'$addFields': {'dataset_status': {'$cond': {'if': {'$in': ['$dataset', blocked_datasets]}, 'then': DATASET_STATUS_BLOCKED, 'else': DATASET_STATUS_NORMAL}}}}, {'$group': {'_id': {'type': '$type', 'status': '$status', 'dataset_status': '$dataset_status'}, 'total': {'$sum': 1}}}, {'$project': {'job_type': '$_id.type', 'status': '$_id.status', 'dataset_status': '$_id.dataset_status', 'total': '$total'}}])} def get_jobs_count_by_worker_size(self) -> JobsCountByWorkerSize: blocked_datasets = get_blocked_datasets() return {WorkerSize.heavy.name: JobDocument.objects(dataset__nin=blocked_datasets, difficulty__lte=100, difficulty__gt=70).count(), WorkerSize.medium.name: JobDocument.objects(dataset__nin=blocked_datasets, difficulty__lte=70, difficulty__gt=40).count(), WorkerSize.light.name: JobDocument.objects(dataset__nin=blocked_datasets, difficulty__lte=40, difficulty__gt=0).count()} def get_dump_with_status(self, status: Status, job_type: str) -> list[JobDict]: return [d.to_dict() for d in JobDocument.objects(status=status.value, type=job_type)] def get_dump_by_pending_status(self, job_type: str) -> DumpByPendingStatus: return {'waiting': self.get_dump_with_status(job_type=job_type, status=Status.WAITING), 'started': self.get_dump_with_status(job_type=job_type, status=Status.STARTED)} def get_dataset_pending_jobs_for_type(self, dataset: str, job_type: str) -> list[JobDict]: return [d.to_dict() for d in JobDocument.objects(type=job_type, dataset=dataset)] def heartbeat(self, job_id: str) -> None: job = self.get_job_with_id(job_id) job.update(last_heartbeat=get_datetime()) def get_zombies(self, max_seconds_without_heartbeat: float) -> list[JobInfo]: started_jobs = JobDocument.objects(status=Status.STARTED) if max_seconds_without_heartbeat <= 0: return [] zombies = [job for job in started_jobs if job.last_heartbeat is not None and get_datetime() >= pytz.UTC.localize(job.last_heartbeat) + timedelta(seconds=max_seconds_without_heartbeat) or (job.last_heartbeat is None and job.started_at is not None and (get_datetime() >= pytz.UTC.localize(job.started_at) + timedelta(seconds=max_seconds_without_heartbeat)))] return [zombie.info() for zombie in zombies] # File: dataset-viewer-main/libs/libcommon/src/libcommon/queue/lock.py import contextlib import json import logging import time import types from collections.abc import Sequence from enum import IntEnum from types import TracebackType from typing import Generic, Literal, Optional, TypeVar from mongoengine import Document from mongoengine.errors import NotUniqueError from mongoengine.fields import DateTimeField, IntField, StringField from mongoengine.queryset.queryset import QuerySet from libcommon.constants import LOCK_TTL_SECONDS_NO_OWNER, LOCK_TTL_SECONDS_TO_START_JOB, LOCK_TTL_SECONDS_TO_WRITE_ON_GIT_BRANCH, QUEUE_COLLECTION_LOCKS, QUEUE_MONGOENGINE_ALIAS from libcommon.utils import get_datetime U = TypeVar('U', bound=Document) def no_op(self, _): return self QuerySet.__class_getitem__ = types.MethodType(no_op, QuerySet) class QuerySetManager(Generic[U]): def __get__(self, instance: object, cls: type[U]) -> QuerySet[U]: return QuerySet(cls, cls._get_collection()) class _TTL(IntEnum): LOCK_TTL_SECONDS_TO_START_JOB = LOCK_TTL_SECONDS_TO_START_JOB LOCK_TTL_SECONDS_TO_WRITE_ON_GIT_BRANCH = LOCK_TTL_SECONDS_TO_WRITE_ON_GIT_BRANCH class Lock(Document): meta = {'collection': QUEUE_COLLECTION_LOCKS, 'db_alias': QUEUE_MONGOENGINE_ALIAS, 'indexes': [('key', 'owner'), {'name': 'LOCK_TTL_SECONDS_NO_OWNER', 'fields': ['updated_at'], 'expireAfterSeconds': LOCK_TTL_SECONDS_NO_OWNER, 'partialFilterExpression': {'owner': None}}] + [{'name': ttl.name, 'fields': ['updated_at'], 'expireAfterSeconds': ttl, 'partialFilterExpression': {'ttl': ttl}} for ttl in _TTL]} key = StringField(primary_key=True) owner = StringField() ttl = IntField() job_id = StringField() created_at = DateTimeField() updated_at = DateTimeField() objects = QuerySetManager['Lock']() class lock(contextlib.AbstractContextManager['lock']): TTL = _TTL _default_sleeps = (0.05, 0.05, 0.05, 1, 1, 1, 5) def __init__(self, key: str, owner: str, sleeps: Sequence[float]=_default_sleeps, ttl: Optional[_TTL]=None) -> None: self.key = key self.owner = owner self.sleeps = sleeps self.ttl = ttl if ttl is not None and ttl not in list(self.TTL): raise ValueError(f'The TTL value is not supported by the TTL index. It should be one of {list(self.TTL)}') def acquire(self) -> None: for sleep in self.sleeps: try: Lock.objects(key=self.key, owner__in=[None, self.owner]).update(upsert=True, write_concern={'w': 'majority', 'fsync': True}, read_concern={'level': 'majority'}, owner=self.owner, updated_at=get_datetime(), ttl=self.ttl) return except NotUniqueError: logging.debug(f"Sleep {sleep}s to acquire lock '{self.key}' for owner='{self.owner}'") time.sleep(sleep) raise TimeoutError("lock couldn't be acquired") def release(self) -> None: Lock.objects(key=self.key, owner=self.owner).update(write_concern={'w': 'majority', 'fsync': True}, read_concern={'level': 'majority'}, owner=None, updated_at=get_datetime()) def __enter__(self) -> 'lock': self.acquire() return self def __exit__(self, exctype: Optional[type[BaseException]], excinst: Optional[BaseException], exctb: Optional[TracebackType]) -> Literal[False]: self.release() return False @classmethod def git_branch(cls, dataset: str, branch: str, owner: str, sleeps: Sequence[float]=_default_sleeps) -> 'lock': key = json.dumps({'dataset': dataset, 'branch': branch}) return cls(key=key, owner=owner, sleeps=sleeps, ttl=_TTL.LOCK_TTL_SECONDS_TO_WRITE_ON_GIT_BRANCH) def release_lock(key: str) -> None: Lock.objects(key=key).update(write_concern={'w': 'majority', 'fsync': True}, read_concern={'level': 'majority'}, owner=None, updated_at=get_datetime()) # File: dataset-viewer-main/libs/libcommon/src/libcommon/queue/metrics.py import types from typing import Generic, TypeVar from bson import ObjectId from mongoengine import Document from mongoengine.fields import DateTimeField, EnumField, IntField, ObjectIdField, StringField from mongoengine.queryset.queryset import QuerySet from libcommon.constants import QUEUE_MONGOENGINE_ALIAS, TYPE_STATUS_AND_DATASET_STATUS_JOB_COUNTS_COLLECTION, WORKER_TYPE_JOB_COUNTS_COLLECTION from libcommon.dtos import Status, WorkerSize from libcommon.queue.dataset_blockages import DATASET_STATUS_NORMAL, is_blocked from libcommon.utils import get_datetime U = TypeVar('U', bound=Document) def no_op(self, _): return self QuerySet.__class_getitem__ = types.MethodType(no_op, QuerySet) class QuerySetManager(Generic[U]): def __get__(self, instance: object, cls: type[U]) -> QuerySet[U]: return QuerySet(cls, cls._get_collection()) class StartedJobError(Exception): pass DEFAULT_INCREASE_AMOUNT = 1 DEFAULT_DECREASE_AMOUNT = -1 class JobTotalMetricDocument(Document): id = ObjectIdField(db_field='_id', primary_key=True, default=ObjectId) job_type = StringField(required=True, unique_with=['status', 'dataset_status']) status = StringField(required=True) dataset_status = StringField(required=True, default=DATASET_STATUS_NORMAL) total = IntField(required=True, default=0) created_at = DateTimeField(default=get_datetime) meta = {'collection': TYPE_STATUS_AND_DATASET_STATUS_JOB_COUNTS_COLLECTION, 'db_alias': QUEUE_MONGOENGINE_ALIAS, 'indexes': [('job_type', 'status', 'dataset_status')]} objects = QuerySetManager['JobTotalMetricDocument']() class WorkerSizeJobsCountDocument(Document): id = ObjectIdField(db_field='_id', primary_key=True, default=ObjectId) worker_size = EnumField(WorkerSize, required=True, unique=True) jobs_count = IntField(required=True, default=0) created_at = DateTimeField(default=get_datetime) @staticmethod def get_worker_size(difficulty: int) -> WorkerSize: if difficulty <= 40: return WorkerSize.light if difficulty <= 70: return WorkerSize.medium return WorkerSize.heavy meta = {'collection': WORKER_TYPE_JOB_COUNTS_COLLECTION, 'db_alias': QUEUE_MONGOENGINE_ALIAS, 'indexes': ['worker_size']} objects = QuerySetManager['WorkerSizeJobsCountDocument']() def _update_metrics(job_type: str, status: str, increase_by: int, dataset_status: str=DATASET_STATUS_NORMAL) -> None: JobTotalMetricDocument.objects(job_type=job_type, status=status, dataset_status=dataset_status).update(upsert=True, write_concern={'w': 'majority', 'fsync': True}, read_concern={'level': 'majority'}, inc__total=increase_by) def _update_worker_size_metrics(increase_by: int, difficulty: int) -> None: worker_size = WorkerSizeJobsCountDocument.get_worker_size(difficulty=difficulty) WorkerSizeJobsCountDocument.objects(worker_size=worker_size).update(upsert=True, write_concern={'w': 'majority', 'fsync': True}, read_concern={'level': 'majority'}, inc__jobs_count=increase_by) def increase_metric(dataset: str, job_type: str, status: str, difficulty: int) -> None: _update_metrics(job_type=job_type, status=status, increase_by=DEFAULT_INCREASE_AMOUNT) if status == Status.WAITING and (not is_blocked(dataset)): _update_worker_size_metrics(DEFAULT_INCREASE_AMOUNT, difficulty) def decrease_metric(job_type: str, status: str, difficulty: int) -> None: _update_metrics(job_type=job_type, status=status, increase_by=DEFAULT_DECREASE_AMOUNT) if status == Status.WAITING: _update_worker_size_metrics(DEFAULT_DECREASE_AMOUNT, difficulty) def decrease_worker_size_metrics(difficulty: int) -> None: _update_worker_size_metrics(DEFAULT_DECREASE_AMOUNT, difficulty) def update_metrics_for_type(dataset: str, job_type: str, previous_status: str, new_status: str, difficulty: int) -> None: if job_type is not None: decrease_metric(job_type=job_type, status=previous_status, difficulty=difficulty) increase_metric(dataset=dataset, job_type=job_type, status=new_status, difficulty=difficulty) # File: dataset-viewer-main/libs/libcommon/src/libcommon/queue/past_jobs.py import types from datetime import datetime from typing import Generic, TypeVar from mongoengine import Document from mongoengine.fields import DateTimeField, IntField, StringField from mongoengine.queryset.queryset import QuerySet from libcommon.constants import QUEUE_COLLECTION_PAST_JOBS, QUEUE_MONGOENGINE_ALIAS from libcommon.queue.dataset_blockages import block_dataset, is_blocked U = TypeVar('U', bound=Document) def no_op(self, _): return self QuerySet.__class_getitem__ = types.MethodType(no_op, QuerySet) class QuerySetManager(Generic[U]): def __get__(self, instance: object, cls: type[U]) -> QuerySet[U]: return QuerySet(cls, cls._get_collection()) MAX_MACHINES = 2 RATE_LIMIT_WINDOW_SECONDS = 1 * 60 * 60 DATASET_BLOCKAGE_THRESHOLD_SECONDS = MAX_MACHINES * RATE_LIMIT_WINDOW_SECONDS JOB_DURATION_CHECK_MIN_SECONDS = 30 JOB_DURATION_MIN_SECONDS = 30 class PastJobDocument(Document): meta = {'collection': QUEUE_COLLECTION_PAST_JOBS, 'db_alias': QUEUE_MONGOENGINE_ALIAS, 'indexes': [{'name': 'PAST_JOB_EXPIRE_AFTER_SECONDS_BLUE', 'fields': ['finished_at'], 'expireAfterSeconds': RATE_LIMIT_WINDOW_SECONDS}]} dataset = StringField(required=True) duration = IntField(required=True, min_value=0) finished_at = DateTimeField(required=True) objects = QuerySetManager['PastJobDocument']() def create_past_job(dataset: str, started_at: datetime, finished_at: datetime) -> bool: duration = int((finished_at - started_at).total_seconds()) if duration < JOB_DURATION_MIN_SECONDS: return False PastJobDocument(dataset=dataset, duration=duration, finished_at=finished_at).save() if not is_blocked(dataset) and duration > JOB_DURATION_CHECK_MIN_SECONDS: if PastJobDocument.objects(dataset=dataset).sum('duration') > DATASET_BLOCKAGE_THRESHOLD_SECONDS: block_dataset(dataset) return True return False # File: dataset-viewer-main/libs/libcommon/src/libcommon/queue/utils.py from .dataset_blockages import DatasetBlockageDocument from .jobs import JobDocument from .lock import Lock from .metrics import JobTotalMetricDocument, WorkerSizeJobsCountDocument from .past_jobs import PastJobDocument def _clean_queue_database() -> None: JobDocument.drop_collection() JobTotalMetricDocument.drop_collection() WorkerSizeJobsCountDocument.drop_collection() Lock.drop_collection() PastJobDocument.drop_collection() DatasetBlockageDocument.drop_collection() # File: dataset-viewer-main/libs/libcommon/src/libcommon/resources.py from dataclasses import dataclass, field from types import TracebackType from typing import Any, Optional, TypeVar from mongoengine.connection import ConnectionFailure, connect, disconnect from pymongo import MongoClient from pymongo.errors import ServerSelectionTimeoutError from libcommon.constants import CACHE_MONGOENGINE_ALIAS, QUEUE_MONGOENGINE_ALIAS T = TypeVar('T', bound='Resource') @dataclass class Resource: def __post_init__(self) -> None: self.allocate() def __enter__(self: T) -> T: return self def __exit__(self, exc_type: Optional[type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[TracebackType]) -> None: self.release() def allocate(self) -> None: pass def release(self) -> None: pass class MongoConnectionFailure(Exception): pass @dataclass class MongoResource(Resource): database: str host: str mongoengine_alias: str server_selection_timeout_ms: int = 30000 _client: MongoClient = field(init=False, repr=False) def allocate(self) -> None: try: self._client = connect(db=self.database, host=self.host, alias=self.mongoengine_alias, serverSelectionTimeoutMS=self.server_selection_timeout_ms) except ConnectionFailure as e: raise MongoConnectionFailure(f'Failed to connect to MongoDB: {e}') from e def is_available(self) -> bool: try: self._client.is_mongos return True except ServerSelectionTimeoutError: return False def create_collection(self, document: Any) -> None: document.ensure_indexes() def enable_pre_and_post_images(self, collection_name: str) -> None: self._client[self.database].command('collMod', collection_name, changeStreamPreAndPostImages={'enabled': True}) def release(self) -> None: disconnect(alias=self.mongoengine_alias) def __reduce__(self) -> tuple[Any, ...]: return (MongoResource, (self.database, self.host, self.mongoengine_alias, self.server_selection_timeout_ms)) @dataclass class CacheMongoResource(MongoResource): mongoengine_alias: str = field(default=CACHE_MONGOENGINE_ALIAS, init=False) @dataclass class QueueMongoResource(MongoResource): mongoengine_alias: str = field(default=QUEUE_MONGOENGINE_ALIAS, init=False) # File: dataset-viewer-main/libs/libcommon/src/libcommon/simple_cache.py import types from collections.abc import Mapping from datetime import date, datetime, time, timedelta from decimal import Decimal from http import HTTPStatus from typing import Any, Generic, NamedTuple, Optional, TypedDict, TypeVar, overload import pandas as pd import pyarrow as pa from bson import CodecOptions, ObjectId from bson.codec_options import TypeEncoder, TypeRegistry from bson.errors import InvalidId from mongoengine import Document from mongoengine.errors import DoesNotExist from mongoengine.fields import DateTimeField, DictField, EnumField, FloatField, IntField, ObjectIdField, StringField from mongoengine.queryset.queryset import QuerySet from pymongoarrow.api import Schema, find_pandas_all from libcommon.constants import CACHE_COLLECTION_RESPONSES, CACHE_METRICS_COLLECTION, CACHE_MONGOENGINE_ALIAS, ERROR_CODES_TO_RETRY from libcommon.dtos import JobParams from libcommon.utils import get_datetime class DateCodec(TypeEncoder): python_type = date transform_python = str class TimeCodec(TypeEncoder): python_type = time transform_python = str class TimedeltaCodec(TypeEncoder): python_type = timedelta transform_python = str class DecimalCodec(TypeEncoder): python_type = Decimal transform_python = str type_registry = TypeRegistry([DateCodec(), TimeCodec(), TimedeltaCodec(), DecimalCodec()]) U = TypeVar('U', bound=Document) def no_op(self, _): return self QuerySet.__class_getitem__ = types.MethodType(no_op, QuerySet) class QuerySetManager(Generic[U]): def __get__(self, instance: object, cls: type[U]) -> QuerySet[U]: codec_options = CodecOptions(type_registry=type_registry) cls._collection = cls._get_db().get_collection(cls._get_collection_name(), codec_options=codec_options) return QuerySet(cls, cls._get_collection()) class SplitFullName(NamedTuple): dataset: str config: Optional[str] split: Optional[str] PA_SCHEMA = Schema({'kind': pa.string(), 'dataset': pa.string(), 'config': pa.string(), 'split': pa.string(), 'http_status': pa.int32(), 'error_code': pa.string(), 'dataset_git_revision': pa.string(), 'job_runner_version': pa.int32(), 'progress': pa.float64(), 'updated_at': pa.timestamp('ms'), 'failed_runs': pa.int32()}) class CachedResponseDocument(Document): id = ObjectIdField(db_field='_id', primary_key=True, default=ObjectId) kind = StringField(required=True, unique_with=['dataset', 'config', 'split']) dataset = StringField(required=True) config = StringField() split = StringField() http_status = EnumField(HTTPStatus, required=True) error_code = StringField() content = DictField(required=True) dataset_git_revision = StringField(required=True) progress = FloatField(min_value=0.0, max_value=1.0) job_runner_version = IntField() failed_runs = IntField(default=0) details = DictField() updated_at = DateTimeField(default=get_datetime) duration = FloatField() meta = {'collection': CACHE_COLLECTION_RESPONSES, 'db_alias': CACHE_MONGOENGINE_ALIAS, 'indexes': [('kind', 'dataset', 'config', 'split'), ('dataset', 'kind', 'http_status'), ('kind', 'http_status', 'error_code'), ('kind', 'http_status', '_id'), ('kind', '_id'), ('details.cause_exception', 'error_code', 'details.copied_from_artifact'), ('error_code', 'kind', 'details.copied_from_artifact'), ('http_status', 'error_code', 'kind', 'updated_at')]} objects = QuerySetManager['CachedResponseDocument']() @classmethod def fetch_as_df(cls, query: Optional[Mapping[str, Any]]=None) -> pd.DataFrame: query = query if query is not None else {} collection = cls._get_collection() return find_pandas_all(collection, query, schema=PA_SCHEMA) DEFAULT_INCREASE_AMOUNT = 1 DEFAULT_DECREASE_AMOUNT = -1 class CacheTotalMetricDocument(Document): id = ObjectIdField(db_field='_id', primary_key=True, default=ObjectId) kind = StringField(required=True) http_status = IntField(required=True) error_code = StringField() total = IntField(required=True, default=0) created_at = DateTimeField(default=get_datetime) meta = {'collection': CACHE_METRICS_COLLECTION, 'db_alias': CACHE_MONGOENGINE_ALIAS, 'indexes': [{'fields': ['kind', 'http_status', 'error_code'], 'unique': True}]} objects = QuerySetManager['CacheTotalMetricDocument']() CachedResponseDocument.config.required = False CachedResponseDocument.split.required = False class CachedArtifactNotFoundError(Exception): kind: str dataset: str config: Optional[str] split: Optional[str] def __init__(self, kind: str, dataset: str, config: Optional[str], split: Optional[str]): super().__init__(f'Cache entry does not exist: kind={kind!r} dataset={dataset!r} config={config!r} split={split!r}') self.kind = kind self.dataset = dataset self.config = config self.split = split def _update_metrics(kind: str, http_status: HTTPStatus, increase_by: int, error_code: Optional[str]=None) -> None: CacheTotalMetricDocument.objects(kind=kind, http_status=http_status, error_code=error_code).upsert_one(inc__total=increase_by) def increase_metric(kind: str, http_status: HTTPStatus, error_code: Optional[str]=None) -> None: _update_metrics(kind=kind, http_status=http_status, error_code=error_code, increase_by=DEFAULT_INCREASE_AMOUNT) def decrease_metric(kind: str, http_status: HTTPStatus, error_code: Optional[str]=None) -> None: _update_metrics(kind=kind, http_status=http_status, error_code=error_code, increase_by=DEFAULT_DECREASE_AMOUNT) def decrease_metric_for_artifact(kind: str, dataset: str, config: Optional[str], split: Optional[str]) -> None: try: existing_cache = CachedResponseDocument.objects(kind=kind, dataset=dataset, config=config, split=split).get() except DoesNotExist: return decrease_metric(kind=kind, http_status=existing_cache.http_status, error_code=existing_cache.error_code) def upsert_response(kind: str, dataset: str, dataset_git_revision: str, content: Mapping[str, Any], http_status: HTTPStatus, config: Optional[str]=None, split: Optional[str]=None, error_code: Optional[str]=None, details: Optional[Mapping[str, Any]]=None, job_runner_version: Optional[int]=None, progress: Optional[float]=None, updated_at: Optional[datetime]=None, duration: Optional[float]=None, failed_runs: int=0) -> None: decrease_metric_for_artifact(kind=kind, dataset=dataset, config=config, split=split) CachedResponseDocument.objects(kind=kind, dataset=dataset, config=config, split=split).upsert_one(content=content, http_status=http_status, error_code=error_code, details=details, dataset_git_revision=dataset_git_revision, progress=progress, updated_at=updated_at or get_datetime(), job_runner_version=job_runner_version, failed_runs=failed_runs, duration=duration) increase_metric(kind=kind, http_status=http_status, error_code=error_code) def upsert_response_params(kind: str, job_params: JobParams, content: Mapping[str, Any], http_status: HTTPStatus, error_code: Optional[str]=None, details: Optional[Mapping[str, Any]]=None, job_runner_version: Optional[int]=None, progress: Optional[float]=None, updated_at: Optional[datetime]=None, duration: Optional[float]=None, failed_runs: int=0) -> None: (dataset, config, split, revision) = (job_params['dataset'], job_params['config'], job_params['split'], job_params['revision']) upsert_response(kind=kind, dataset=dataset, config=config, split=split, content=content, dataset_git_revision=revision, details=details, error_code=error_code, http_status=http_status, job_runner_version=job_runner_version, progress=progress, updated_at=updated_at, duration=duration, failed_runs=failed_runs) def delete_response(kind: str, dataset: str, config: Optional[str]=None, split: Optional[str]=None) -> Optional[int]: decrease_metric_for_artifact(kind=kind, dataset=dataset, config=config, split=split) return CachedResponseDocument.objects(kind=kind, dataset=dataset, config=config, split=split).delete() def delete_dataset_responses(dataset: str) -> int: existing_cache = CachedResponseDocument.objects(dataset=dataset) for cache in existing_cache.no_cache(): decrease_metric(kind=cache.kind, http_status=cache.http_status, error_code=cache.error_code) num_deleted_cache_responses = existing_cache.delete() return 0 if num_deleted_cache_responses is None else num_deleted_cache_responses def update_revision_of_dataset_responses(dataset: str, old_revision: str, new_revision: str) -> int: existing_cache = CachedResponseDocument.objects(dataset=dataset, dataset_git_revision=old_revision) num_updated_cache_responses = existing_cache.update(dataset_git_revision=new_revision) return 0 if num_updated_cache_responses is None else num_updated_cache_responses T = TypeVar('T') @overload def _clean_nested_mongo_object(obj: dict[str, T]) -> dict[str, T]: ... @overload def _clean_nested_mongo_object(obj: list[T]) -> list[T]: ... @overload def _clean_nested_mongo_object(obj: T) -> T: ... def _clean_nested_mongo_object(obj: Any) -> Any: if isinstance(obj, dict): return {k: _clean_nested_mongo_object(v) for (k, v) in obj.items()} elif isinstance(obj, list): return [_clean_nested_mongo_object(v) for v in obj] elif isinstance(obj, tuple): return tuple((_clean_nested_mongo_object(v) for v in obj)) else: return obj class CacheEntryWithoutContent(TypedDict): http_status: HTTPStatus dataset_git_revision: str error_code: Optional[str] progress: Optional[float] job_runner_version: Optional[int] def get_response_without_content(kind: str, dataset: str, config: Optional[str]=None, split: Optional[str]=None) -> CacheEntryWithoutContent: try: response = CachedResponseDocument.objects(kind=kind, dataset=dataset, config=config, split=split).only('http_status', 'error_code', 'job_runner_version', 'dataset_git_revision', 'progress').get() except DoesNotExist as e: raise CachedArtifactNotFoundError(kind=kind, dataset=dataset, config=config, split=split) from e return {'http_status': response.http_status, 'error_code': response.error_code, 'dataset_git_revision': response.dataset_git_revision, 'job_runner_version': response.job_runner_version, 'progress': response.progress} class CacheEntryMetadata(CacheEntryWithoutContent): updated_at: datetime failed_runs: int def get_response_metadata(kind: str, dataset: str, config: Optional[str]=None, split: Optional[str]=None) -> CacheEntryMetadata: try: response = CachedResponseDocument.objects(kind=kind, dataset=dataset, config=config, split=split).only('http_status', 'error_code', 'job_runner_version', 'dataset_git_revision', 'progress', 'updated_at', 'failed_runs').get() except DoesNotExist as e: raise CachedArtifactNotFoundError(kind=kind, dataset=dataset, config=config, split=split) from e return {'http_status': response.http_status, 'error_code': response.error_code, 'dataset_git_revision': response.dataset_git_revision, 'job_runner_version': response.job_runner_version, 'progress': response.progress, 'updated_at': response.updated_at, 'failed_runs': response.failed_runs} class CacheEntry(CacheEntryWithoutContent): content: Mapping[str, Any] class CacheEntryWithDetails(CacheEntry): details: Mapping[str, str] class CachedArtifactError(Exception): kind: str dataset: str config: Optional[str] split: Optional[str] cache_entry_with_details: CacheEntryWithDetails enhanced_details: dict[str, Any] def __init__(self, message: str, kind: str, dataset: str, config: Optional[str], split: Optional[str], cache_entry_with_details: CacheEntryWithDetails): super().__init__(message) self.kind = kind self.dataset = dataset self.config = config self.split = split self.cache_entry_with_details = cache_entry_with_details self.enhanced_details: dict[str, Any] = dict(self.cache_entry_with_details['details'].items()) self.enhanced_details['copied_from_artifact'] = {'kind': self.kind, 'dataset': self.dataset, 'config': self.config, 'split': self.split} def get_response(kind: str, dataset: str, config: Optional[str]=None, split: Optional[str]=None) -> CacheEntry: try: response = CachedResponseDocument.objects(kind=kind, dataset=dataset, config=config, split=split).only('content', 'http_status', 'error_code', 'job_runner_version', 'dataset_git_revision', 'progress').get() except DoesNotExist as e: raise CachedArtifactNotFoundError(kind=kind, dataset=dataset, config=config, split=split) from e return {'content': _clean_nested_mongo_object(response.content), 'http_status': response.http_status, 'error_code': response.error_code, 'job_runner_version': response.job_runner_version, 'dataset_git_revision': response.dataset_git_revision, 'progress': response.progress} def get_response_with_details(kind: str, dataset: str, config: Optional[str]=None, split: Optional[str]=None) -> CacheEntryWithDetails: try: response = CachedResponseDocument.objects(kind=kind, dataset=dataset, config=config, split=split).only('content', 'http_status', 'error_code', 'job_runner_version', 'dataset_git_revision', 'progress', 'details').get() except DoesNotExist as e: raise CachedArtifactNotFoundError(kind=kind, dataset=dataset, config=config, split=split) from e return {'content': _clean_nested_mongo_object(response.content), 'http_status': response.http_status, 'error_code': response.error_code, 'job_runner_version': response.job_runner_version, 'dataset_git_revision': response.dataset_git_revision, 'progress': response.progress, 'details': _clean_nested_mongo_object(response.details)} CACHED_RESPONSE_NOT_FOUND = 'CachedResponseNotFound' DATASET_GIT_REVISION_NOT_FOUND = 'dataset-git-revision-not-found' def get_previous_step_or_raise(kind: str, dataset: str, config: Optional[str]=None, split: Optional[str]=None) -> CacheEntryWithDetails: response = get_response_with_details(kind=kind, dataset=dataset, config=config, split=split) if response['http_status'] != HTTPStatus.OK: raise CachedArtifactError(message='The previous step failed.', kind=kind, dataset=dataset, config=config, split=split, cache_entry_with_details=response) return response def get_all_datasets() -> set[str]: return set(CachedResponseDocument.objects().distinct('dataset')) def get_datasets_with_retryable_errors() -> set[str]: return set(CachedResponseDocument.objects(error_code__in=ERROR_CODES_TO_RETRY).distinct('dataset')) def is_successful_response(kind: str, dataset: str, config: Optional[str]=None, split: Optional[str]=None) -> bool: return CachedResponseDocument.objects(dataset=dataset, config=config, split=split, kind=kind, http_status=HTTPStatus.OK).count() > 0 EntriesTotalByKindStatusAndErrorCode = Mapping[tuple[str, int, Optional[str]], int] def get_responses_count_by_kind_status_and_error_code() -> EntriesTotalByKindStatusAndErrorCode: return {(metric['kind'], metric['http_status'], metric['error_code']): metric['total'] for metric in CachedResponseDocument.objects().aggregate([{'$sort': {'kind': 1, 'http_status': 1, 'error_code': 1}}, {'$group': {'_id': {'kind': '$kind', 'http_status': '$http_status', 'error_code': '$error_code'}, 'total': {'$sum': 1}}}, {'$project': {'kind': '$_id.kind', 'http_status': '$_id.http_status', 'error_code': '$_id.error_code', 'total': '$total'}}])} class CacheReport(TypedDict): kind: str dataset: str dataset_git_revision: str config: Optional[str] split: Optional[str] http_status: int error_code: Optional[str] details: Mapping[str, Any] updated_at: datetime job_runner_version: Optional[int] progress: Optional[float] failed_runs: int class CacheReportsPage(TypedDict): cache_reports: list[CacheReport] next_cursor: str class InvalidCursor(Exception): pass class InvalidLimit(Exception): pass def get_cache_reports(kind: str, cursor: Optional[str], limit: int) -> CacheReportsPage: if not cursor: queryset = CachedResponseDocument.objects(kind=kind) else: try: queryset = CachedResponseDocument.objects(kind=kind, id__gt=ObjectId(cursor)) except InvalidId as err: raise InvalidCursor('Invalid cursor.') from err if limit <= 0: raise InvalidLimit('Invalid limit.') objects = list(queryset.order_by('+id').exclude('content').limit(limit)) return {'cache_reports': [{'kind': kind, 'dataset': object.dataset, 'config': object.config, 'split': object.split, 'http_status': object.http_status.value, 'error_code': object.error_code, 'details': _clean_nested_mongo_object(object.details), 'updated_at': object.updated_at, 'job_runner_version': object.job_runner_version, 'dataset_git_revision': object.dataset_git_revision, 'progress': object.progress, 'failed_runs': object.failed_runs} for object in objects], 'next_cursor': '' if len(objects) < limit else str(objects[-1].id)} def get_outdated_split_full_names_for_step(kind: str, current_version: int) -> list[SplitFullName]: responses = CachedResponseDocument.objects(kind=kind, job_runner_version__lt=current_version).only('dataset', 'config', 'split') return [SplitFullName(dataset=response.dataset, config=response.config, split=response.split) for response in responses] def get_dataset_responses_without_content_for_kind(kind: str, dataset: str) -> list[CacheReport]: responses = CachedResponseDocument.objects(kind=kind, dataset=dataset).exclude('content') return [{'kind': response.kind, 'dataset': response.dataset, 'config': response.config, 'split': response.split, 'http_status': response.http_status, 'error_code': response.error_code, 'details': _clean_nested_mongo_object(response.details), 'updated_at': response.updated_at, 'job_runner_version': response.job_runner_version, 'dataset_git_revision': response.dataset_git_revision, 'progress': response.progress, 'failed_runs': response.failed_runs} for response in responses] class CacheReportWithContent(CacheReport): content: Mapping[str, Any] class CacheReportsWithContentPage(TypedDict): cache_reports_with_content: list[CacheReportWithContent] next_cursor: str def get_cache_reports_with_content(kind: str, cursor: Optional[str], limit: int) -> CacheReportsWithContentPage: if not cursor: queryset = CachedResponseDocument.objects(kind=kind) else: try: queryset = CachedResponseDocument.objects(kind=kind, id__gt=ObjectId(cursor)) except InvalidId as err: raise InvalidCursor('Invalid cursor.') from err if limit <= 0: raise InvalidLimit('Invalid limit.') objects = list(queryset.order_by('+id').limit(limit)) return {'cache_reports_with_content': [{'kind': kind, 'dataset': object.dataset, 'config': object.config, 'split': object.split, 'http_status': object.http_status.value, 'error_code': object.error_code, 'content': _clean_nested_mongo_object(object.content), 'job_runner_version': object.job_runner_version, 'dataset_git_revision': object.dataset_git_revision, 'details': _clean_nested_mongo_object(object.details), 'updated_at': object.updated_at, 'progress': object.progress, 'failed_runs': object.failed_runs} for object in objects], 'next_cursor': '' if len(objects) < limit else str(objects[-1].id)} class CacheEntryFullMetadata(CacheEntryMetadata): kind: str dataset: str config: Optional[str] split: Optional[str] def _get_df(entries: list[CacheEntryFullMetadata]) -> pd.DataFrame: return pd.DataFrame({'kind': pd.Series([entry['kind'] for entry in entries], dtype='category'), 'dataset': pd.Series([entry['dataset'] for entry in entries], dtype='str'), 'config': pd.Series([entry['config'] for entry in entries], dtype='str'), 'split': pd.Series([entry['split'] for entry in entries], dtype='str'), 'http_status': pd.Series([entry['http_status'] for entry in entries], dtype='category'), 'error_code': pd.Series([entry['error_code'] for entry in entries], dtype='category'), 'dataset_git_revision': pd.Series([entry['dataset_git_revision'] for entry in entries], dtype='str'), 'job_runner_version': pd.Series([entry['job_runner_version'] for entry in entries], dtype=pd.Int16Dtype()), 'progress': pd.Series([entry['progress'] for entry in entries], dtype='float'), 'updated_at': pd.Series([entry['updated_at'] for entry in entries], dtype='datetime64[ns]'), 'failed_runs': pd.Series([entry['failed_runs'] for entry in entries], dtype=pd.Int16Dtype())}) def get_cache_entries_df(dataset: str, cache_kinds: Optional[list[str]]=None) -> pd.DataFrame: filters = {'dataset': dataset} if cache_kinds: filters['kind'] = {'$in': cache_kinds} return CachedResponseDocument.fetch_as_df(query=filters) def get_cache_count_for_dataset(dataset: str) -> int: return CachedResponseDocument.objects(dataset=dataset).count() def has_some_cache(dataset: str) -> bool: return get_cache_count_for_dataset(dataset) > 0 def fetch_names(dataset: str, config: Optional[str], cache_kind: str, names_field: str, name_field: str) -> list[str]: try: names = [] response = get_response_with_details(kind=cache_kind, dataset=dataset, config=config) for name_item in response['content'][names_field]: name = name_item[name_field] if not isinstance(name, str): raise ValueError(f'Invalid name: {name}, type should be str, got: {type(name)}') names.append(name) return names except Exception: return [] def get_datasets_with_last_updated_kind(kind: str, days: int) -> list[str]: pipeline = [{'$match': {'kind': kind, 'http_status': HTTPStatus.OK, 'updated_at': {'$gt': get_datetime(days=days)}}}, {'$sort': {'updated_at': 1}}, {'$group': {'_id': 0, 'datasets': {'$addToSet': '$dataset'}}}, {'$unwind': '$datasets'}] return [str(dataset['datasets']) for dataset in CachedResponseDocument.objects(kind=kind, http_status=HTTPStatus.OK, updated_at__gt=get_datetime(days=days)).aggregate(pipeline)] def _clean_cache_database() -> None: CachedResponseDocument.drop_collection() CacheTotalMetricDocument.drop_collection() # File: dataset-viewer-main/libs/libcommon/src/libcommon/state.py import logging from dataclasses import InitVar, dataclass, field from itertools import islice from typing import Optional import pandas as pd from libcommon.constants import CONFIG_SPLIT_NAMES_KIND, DATASET_CONFIG_NAMES_KIND, MAX_FAILED_RUNS_PER_ERROR_CODE from libcommon.processing_graph import Artifact, ProcessingGraph from libcommon.prometheus import StepProfiler from libcommon.simple_cache import CacheEntryMetadata, fetch_names class IncoherentCacheError(Exception): pass class UnexceptedConfigNamesError(IncoherentCacheError): pass class UnexceptedSplitNamesError(IncoherentCacheError): pass @dataclass class JobState: dataset: str revision: str config: Optional[str] split: Optional[str] job_type: str valid_pending_jobs_df: pd.DataFrame = field(init=False) is_in_process: bool = field(init=False) pending_jobs_df: InitVar[pd.DataFrame] def __post_init__(self, pending_jobs_df: pd.DataFrame) -> None: self.valid_pending_jobs_df = pending_jobs_df.sort_values(['status', 'priority', 'created_at'], ascending=[False, False, True]).head(1).copy() self.is_in_process = not self.valid_pending_jobs_df.empty @dataclass class CacheState: dataset: str config: Optional[str] split: Optional[str] cache_kind: str job_runner_version: int cache_entry_metadata: Optional[CacheEntryMetadata] = field(init=False) exists: bool = field(init=False) is_success: bool = field(init=False) cache_entries_df: InitVar[pd.DataFrame] def __post_init__(self, cache_entries_df: pd.DataFrame) -> None: if len(cache_entries_df) > 1: logging.warning(f'More than one cache entry found for {self.dataset}, {self.config}, {self.split}, {self.cache_kind}') if len(cache_entries_df) == 0: self.cache_entry_metadata = None else: entry = cache_entries_df.iloc[0].to_dict() self.cache_entry_metadata = CacheEntryMetadata(http_status=entry['http_status'], error_code=entry['error_code'], job_runner_version=entry['job_runner_version'], dataset_git_revision=entry['dataset_git_revision'], updated_at=entry['updated_at'], progress=entry['progress'], failed_runs=entry['failed_runs']) '' self.exists = self.cache_entry_metadata is not None self.is_success = self.cache_entry_metadata is not None and self.cache_entry_metadata['http_status'] < 400 def is_empty(self) -> bool: return self.cache_entry_metadata is None def is_error_to_retry(self) -> bool: return self.cache_entry_metadata is not None and (self.cache_entry_metadata['http_status'] >= 400 and self.cache_entry_metadata['error_code'] in MAX_FAILED_RUNS_PER_ERROR_CODE and (self.cache_entry_metadata['failed_runs'] < MAX_FAILED_RUNS_PER_ERROR_CODE[self.cache_entry_metadata['error_code']])) def is_older_than(self, other: 'CacheState') -> bool: if self.cache_entry_metadata is None or other.cache_entry_metadata is None: return False return self.cache_entry_metadata['updated_at'] < other.cache_entry_metadata['updated_at'] def is_git_revision_different_from(self, git_revision: Optional[str]) -> bool: return self.cache_entry_metadata is None or self.cache_entry_metadata['dataset_git_revision'] != git_revision def is_job_runner_obsolete(self) -> bool: if self.cache_entry_metadata is None: return False if self.cache_entry_metadata['job_runner_version'] is None: return True return self.cache_entry_metadata['job_runner_version'] < self.job_runner_version @dataclass class ArtifactState(Artifact): job_state: JobState = field(init=False) cache_state: CacheState = field(init=False) pending_jobs_df: InitVar[pd.DataFrame] cache_entries_df: InitVar[pd.DataFrame] def __post_init__(self, pending_jobs_df: pd.DataFrame, cache_entries_df: pd.DataFrame) -> None: super().__post_init__() self.job_state = JobState(job_type=self.processing_step.job_type, dataset=self.dataset, revision=self.revision, config=self.config, split=self.split, pending_jobs_df=pending_jobs_df) self.cache_state = CacheState(cache_kind=self.processing_step.cache_kind, dataset=self.dataset, config=self.config, split=self.split, job_runner_version=self.processing_step.job_runner_version, cache_entries_df=cache_entries_df) @dataclass class SplitState: dataset: str revision: str config: str split: str processing_graph: ProcessingGraph artifact_state_by_step: dict[str, ArtifactState] = field(init=False) pending_jobs_df: InitVar[pd.DataFrame] cache_entries_df: InitVar[pd.DataFrame] def __post_init__(self, pending_jobs_df: pd.DataFrame, cache_entries_df: pd.DataFrame) -> None: self.artifact_state_by_step = {processing_step.name: ArtifactState(processing_step=processing_step, dataset=self.dataset, revision=self.revision, config=self.config, split=self.split, pending_jobs_df=pending_jobs_df[pending_jobs_df['type'] == processing_step.job_type], cache_entries_df=cache_entries_df[cache_entries_df['kind'] == processing_step.cache_kind]) for processing_step in self.processing_graph.get_input_type_processing_steps(input_type='split')} @dataclass class ConfigState: dataset: str revision: str config: str processing_graph: ProcessingGraph split_names: list[str] = field(init=False) split_states: list[SplitState] = field(init=False) artifact_state_by_step: dict[str, ArtifactState] = field(init=False) pending_jobs_df: InitVar[pd.DataFrame] cache_entries_df: InitVar[pd.DataFrame] def __post_init__(self, pending_jobs_df: pd.DataFrame, cache_entries_df: pd.DataFrame) -> None: with StepProfiler(method='ConfigState.__post_init__', step='get_config_level_artifact_states'): self.artifact_state_by_step = {processing_step.name: ArtifactState(processing_step=processing_step, dataset=self.dataset, revision=self.revision, config=self.config, split=None, pending_jobs_df=pending_jobs_df[pending_jobs_df['split'].isnull() & (pending_jobs_df['type'] == processing_step.job_type)], cache_entries_df=cache_entries_df[cache_entries_df['kind'] == processing_step.cache_kind]) for processing_step in self.processing_graph.get_input_type_processing_steps(input_type='config')} with StepProfiler(method='ConfigState.__post_init__', step='get_split_names'): self.split_names = fetch_names(dataset=self.dataset, config=self.config, cache_kind=CONFIG_SPLIT_NAMES_KIND, names_field='splits', name_field='split') unexpected_split_names = set(cache_entries_df['split'].unique()).difference(set(self.split_names).union({None})) if unexpected_split_names: raise UnexceptedSplitNamesError(f"Unexpected split names for dataset={self.dataset} config={self.config} ({len(unexpected_split_names)}): {list(islice(unexpected_split_names, 10))}{('' if len(unexpected_split_names) <= 10 else '...')}") with StepProfiler(method='ConfigState.__post_init__', step='get_split_states'): self.split_states = [SplitState(dataset=self.dataset, revision=self.revision, config=self.config, split=split_name, processing_graph=self.processing_graph, pending_jobs_df=pending_jobs_df[pending_jobs_df['split'] == split_name], cache_entries_df=cache_entries_df[cache_entries_df['split'] == split_name]) for split_name in self.split_names] @dataclass class DatasetState: dataset: str revision: str processing_graph: ProcessingGraph config_names: list[str] = field(init=False) config_states: list[ConfigState] = field(init=False) artifact_state_by_step: dict[str, ArtifactState] = field(init=False) pending_jobs_df: InitVar[pd.DataFrame] cache_entries_df: InitVar[pd.DataFrame] def __post_init__(self, pending_jobs_df: pd.DataFrame, cache_entries_df: pd.DataFrame) -> None: with StepProfiler(method='DatasetState.__post_init__', step='get_dataset_level_artifact_states'): self.artifact_state_by_step = {processing_step.name: ArtifactState(processing_step=processing_step, dataset=self.dataset, revision=self.revision, config=None, split=None, pending_jobs_df=pending_jobs_df[(pending_jobs_df['revision'] == self.revision) & pending_jobs_df['config'].isnull() & pending_jobs_df['split'].isnull() & (pending_jobs_df['type'] == processing_step.job_type)], cache_entries_df=cache_entries_df[(cache_entries_df['kind'] == processing_step.cache_kind) & cache_entries_df['config'].isnull() & cache_entries_df['split'].isnull()]) for processing_step in self.processing_graph.get_input_type_processing_steps(input_type='dataset')} with StepProfiler(method='DatasetState.__post_init__', step='get_config_names'): self.config_names = fetch_names(dataset=self.dataset, config=None, cache_kind=DATASET_CONFIG_NAMES_KIND, names_field='config_names', name_field='config') unexpected_config_names = set(cache_entries_df['config'].unique()).difference(set(self.config_names).union({None})) if unexpected_config_names: raise UnexceptedConfigNamesError(f"Unexpected config names ({len(unexpected_config_names)}): {list(islice(unexpected_config_names, 10))}{('' if len(unexpected_config_names) <= 10 else '...')}") with StepProfiler(method='DatasetState.__post_init__', step='get_config_states'): self.config_states = [ConfigState(dataset=self.dataset, revision=self.revision, config=config_name, processing_graph=self.processing_graph, pending_jobs_df=pending_jobs_df[(pending_jobs_df['revision'] == self.revision) & (pending_jobs_df['config'] == config_name)], cache_entries_df=cache_entries_df[cache_entries_df['config'] == config_name]) for config_name in self.config_names] @dataclass class FirstStepsDatasetState(DatasetState): def __post_init__(self, pending_jobs_df: pd.DataFrame, cache_entries_df: pd.DataFrame) -> None: with StepProfiler(method='FirstStepsDatasetState.__post_init__', step='get_dataset_level_artifact_states'): self.artifact_state_by_step = {processing_step.name: ArtifactState(processing_step=processing_step, dataset=self.dataset, revision=self.revision, config=None, split=None, pending_jobs_df=pending_jobs_df[(pending_jobs_df['revision'] == self.revision) & pending_jobs_df['config'].isnull() & pending_jobs_df['split'].isnull() & (pending_jobs_df['type'] == processing_step.job_type)], cache_entries_df=cache_entries_df[(cache_entries_df['kind'] == processing_step.cache_kind) & cache_entries_df['config'].isnull() & cache_entries_df['split'].isnull()]) for processing_step in self.processing_graph.get_first_processing_steps()} self.config_names = [] self.config_states = [] # File: dataset-viewer-main/libs/libcommon/src/libcommon/storage.py import logging import os import shutil from datetime import datetime, timedelta from os import PathLike, makedirs from pathlib import Path from typing import Optional, Union from appdirs import user_cache_dir from libcommon.constants import DESCRIPTIVE_STATISTICS_CACHE_APPNAME, DUCKDB_INDEX_CACHE_APPNAME, HF_DATASETS_CACHE_APPNAME, PARQUET_METADATA_CACHE_APPNAME StrPath = Union[str, PathLike[str]] def init_dir(directory: Optional[StrPath]=None, appname: Optional[str]=None) -> StrPath: if directory is None: directory = user_cache_dir(appname=appname) logging.debug(f'Directory defaulting to user-specific cache: {directory}') makedirs(directory, exist_ok=True) logging.debug(f'Directory created at: {directory}') return directory def init_parquet_metadata_dir(directory: Optional[StrPath]=None) -> StrPath: return init_dir(directory, appname=PARQUET_METADATA_CACHE_APPNAME) def init_duckdb_index_cache_dir(directory: Optional[StrPath]=None) -> StrPath: return init_dir(directory, appname=DUCKDB_INDEX_CACHE_APPNAME) def init_hf_datasets_cache_dir(directory: Optional[StrPath]=None) -> StrPath: return init_dir(directory, appname=HF_DATASETS_CACHE_APPNAME) def init_statistics_cache_dir(directory: Optional[StrPath]=None) -> StrPath: return init_dir(directory, appname=DESCRIPTIVE_STATISTICS_CACHE_APPNAME) def exists(path: StrPath) -> bool: return Path(path).exists() def remove_dir(directory: StrPath) -> None: shutil.rmtree(directory, ignore_errors=True) logging.debug(f'Directory removed: {directory}') def clean_dir(root_folder: StrPath, expired_time_interval_seconds: int) -> None: logging.info(f'looking for all files and directories under {root_folder} to delete files with last accessed time before {expired_time_interval_seconds} seconds ago or is empty folder') now = datetime.now().replace(tzinfo=None) errors = 0 total_dirs = 0 total_files = 0 total_disappeared_paths = 0 for (root, _, files) in os.walk(root_folder, topdown=False): try: for name in files: path = os.path.join(root, name) last_access_time_value = os.path.getatime(path) last_access_datetime = datetime.fromtimestamp(last_access_time_value).replace(tzinfo=None) if last_access_datetime + timedelta(seconds=expired_time_interval_seconds) <= now: logging.info(f'deleting file path={path!r} last_access_datetime={last_access_datetime!r}') os.remove(path) total_files += 1 try: os.rmdir(root) logging.info(f'deleting directory root={root!r} because it was empty') total_dirs += 1 except OSError: pass except FileNotFoundError: logging.error(f'failed to delete path={path!r} because it has disappeared during the loop') total_disappeared_paths += 1 if total_files: logging.info(f'clean_directory removed {total_files} files at the root of the cache directory.') if total_disappeared_paths: logging.info(f'clean_directory failed to delete {total_disappeared_paths} paths because they disappeared during the loop.') logging.info(f'clean_directory removed {total_dirs - errors} directories at the root of the cache directory.') # File: dataset-viewer-main/libs/libcommon/src/libcommon/storage_client.py import logging from typing import Optional, Union from urllib import parse import fsspec from fsspec.implementations.local import LocalFileSystem from s3fs import S3FileSystem from libcommon.config import S3Config, StorageProtocol from libcommon.constants import DATASET_SEPARATOR from libcommon.url_preparator import URLPreparator class StorageClientInitializeError(Exception): pass class StorageClient: _fs: Union[LocalFileSystem, S3FileSystem] protocol: StorageProtocol storage_root: str base_url: str overwrite: bool url_preparator: Optional[URLPreparator] = None def __init__(self, protocol: StorageProtocol, storage_root: str, base_url: str, overwrite: bool=False, s3_config: Optional[S3Config]=None, url_preparator: Optional[URLPreparator]=None) -> None: logging.info(f'trying to initialize storage client with protocol={protocol!r} storage_root={storage_root!r} base_url={base_url!r} overwrite={overwrite!r}') self.storage_root = storage_root self.protocol = protocol self.base_url = base_url self.overwrite = overwrite self.url_preparator = url_preparator if protocol == 's3': if not s3_config: raise StorageClientInitializeError('s3 config is required') self._fs = fsspec.filesystem(protocol, key=s3_config.access_key_id, secret=s3_config.secret_access_key, client_kwargs={'region_name': s3_config.region_name}, max_paths=100) elif protocol == 'file': self._fs = fsspec.filesystem(protocol, auto_mkdir=True) else: raise StorageClientInitializeError('unsupported protocol') self._validate() def _validate(self) -> None: try: self._fs.ls(self.storage_root) except FileNotFoundError: self._fs.mkdir(self.storage_root) except Exception as e: raise StorageClientInitializeError('error when trying to initialize client', e) def get_full_path(self, path: str) -> str: return f'{self.storage_root}/{path}' def exists(self, path: str) -> bool: return bool(self._fs.exists(self.get_full_path(path))) def get_url(self, path: str, revision: str) -> str: return self.prepare_url(self.get_unprepared_url(path), revision=revision) def get_unprepared_url(self, path: str) -> str: url = f'{self.base_url}/{path}' logging.debug(f'unprepared url: {url}') return url def prepare_url(self, url: str, revision: str) -> str: if self.url_preparator: url = self.url_preparator.prepare_url(url=url, revision=revision) logging.debug(f'prepared url: {url}') return url def delete_dataset_directory(self, dataset: str) -> int: dataset_key = self.get_full_path(dataset) try: self._fs.rm(dataset_key, recursive=True) logging.info(f'Directory deleted: {dataset_key}') return 1 except FileNotFoundError: return 0 except Exception: logging.warning(f'Could not delete directory {dataset_key}') return 0 def update_revision_of_dataset_revision_directory(self, dataset: str, old_revision: str, new_revision: str) -> int: old_dataset_revision_key = self.get_full_path(f'{parse.quote(dataset)}/{DATASET_SEPARATOR}/{old_revision}') new_dataset_revision_key = self.get_full_path(f'{parse.quote(dataset)}/{DATASET_SEPARATOR}/{new_revision}') try: self._fs.mv(old_dataset_revision_key, new_dataset_revision_key, recursive=True) logging.info(f'Revision of the directory updated: {old_dataset_revision_key} -> {new_dataset_revision_key}') return 1 except Exception: logging.warning(f'Could not update the revision of directory {old_dataset_revision_key} to {new_dataset_revision_key}') return 0 @staticmethod def generate_object_key(dataset: str, revision: str, config: str, split: str, row_idx: int, column: str, filename: str) -> str: return f'{parse.quote(dataset)}/{DATASET_SEPARATOR}/{revision}/{DATASET_SEPARATOR}/{parse.quote(config)}/{parse.quote(split)}/{str(row_idx)}/{parse.quote(column)}/{filename}' def __str__(self) -> str: return f'StorageClient(protocol={self.protocol}, storage_root={self.storage_root}, base_url={self.base_url}, overwrite={self.overwrite}, url_preparator={self.url_preparator})' # File: dataset-viewer-main/libs/libcommon/src/libcommon/url_preparator.py from abc import ABC from collections.abc import Mapping from dataclasses import dataclass from typing import Any, Callable, Literal, Optional, Union from datasets import Audio, Features, Image from datasets.features.features import FeatureType, Sequence from libcommon.cloudfront import CloudFrontSigner from libcommon.dtos import FeatureItem from libcommon.viewer_utils.asset import replace_dataset_git_revision_placeholder class InvalidFirstRowsError(ValueError): pass VisitPath = list[Union[str, Literal[0]]] @dataclass class AssetUrlPath: feature_type: Literal['Audio', 'Image'] path: VisitPath def enter(self) -> 'AssetUrlPath': if len(self.path) == 0: raise ValueError('Cannot enter an empty path') return AssetUrlPath(feature_type=self.feature_type, path=self.path[1:]) def to_features_dict(features: list[FeatureItem]) -> Features: return Features({feature_item['name']: feature_item['type'] for feature_item in features}) def _visit(feature: FeatureType, func: Callable[[FeatureType, VisitPath], Optional[FeatureType]], visit_path: VisitPath=[]) -> FeatureType: if isinstance(feature, Sequence) and isinstance(feature.feature, dict): feature = {k: [f] for (k, f) in feature.feature.items()} if isinstance(feature, dict): out = func({k: _visit(f, func, visit_path + [k]) for (k, f) in feature.items()}, visit_path) elif isinstance(feature, (list, tuple)): out = func([_visit(feature[0], func, visit_path + [0])], visit_path) elif isinstance(feature, Sequence): out = func(Sequence(_visit(feature.feature, func, visit_path + [0]), length=feature.length), visit_path) else: out = func(feature, visit_path) return feature if out is None else out def get_asset_url_paths(features: Features) -> list[AssetUrlPath]: asset_url_paths: list[AssetUrlPath] = [] for (column, feature) in features.items(): def classify(feature: FeatureType, visit_path: VisitPath) -> None: if isinstance(feature, Image): asset_url_paths.append(AssetUrlPath(feature_type='Image', path=visit_path)) elif isinstance(feature, Audio): asset_url_paths.append(AssetUrlPath(feature_type='Audio', path=visit_path + [0])) _visit(feature, classify, [column]) return asset_url_paths class URLPreparator(ABC): def __init__(self, url_signer: Optional[CloudFrontSigner]) -> None: self.url_signer = url_signer def prepare_url(self, url: str, revision: str) -> str: url = replace_dataset_git_revision_placeholder(url, revision) if self.url_signer: url = self.url_signer.sign_url(url) return url def __str__(self) -> str: return f'{self.__class__.__name__}(url_signer={self.url_signer})' def _prepare_asset_url_path_in_place(self, cell: Any, asset_url_path: AssetUrlPath, revision: str) -> Any: if not cell: return cell elif len(asset_url_path.path) == 0: if not isinstance(cell, dict): raise InvalidFirstRowsError('Expected the cell to be a dict') src = cell.get('src') if not isinstance(src, str): raise InvalidFirstRowsError('Expected cell["src"] to be a string') cell['src'] = self.prepare_url(src, revision=revision) else: key = asset_url_path.path[0] if key == 0: if not isinstance(cell, list): raise InvalidFirstRowsError('Expected the cell to be a list') for cell_item in cell: self._prepare_asset_url_path_in_place(cell=cell_item, asset_url_path=asset_url_path.enter(), revision=revision) else: if not isinstance(cell, dict): raise InvalidFirstRowsError('Expected the cell to be a dict') self._prepare_asset_url_path_in_place(cell=cell[key], asset_url_path=asset_url_path.enter(), revision=revision) def _get_asset_url_paths_from_first_rows(self, first_rows: Mapping[str, Any]) -> list[AssetUrlPath]: features_list = first_rows.get('features') if not isinstance(features_list, list): raise InvalidFirstRowsError('Expected response["features"] a list') features_dict = to_features_dict(features_list) features = Features.from_dict(features_dict) return get_asset_url_paths(features) def prepare_urls_in_first_rows_in_place(self, first_rows: Mapping[str, Any], revision: str) -> None: asset_url_paths = self._get_asset_url_paths_from_first_rows(first_rows=first_rows) if not asset_url_paths: return row_items = first_rows.get('rows') if not isinstance(row_items, list): raise InvalidFirstRowsError('Expected response["rows"] to be a list') for row_item in row_items: if not isinstance(row_item, dict): raise InvalidFirstRowsError('Expected response["rows"][i] to be a dict') truncated_cells = row_item.get('truncated_cells') if not isinstance(truncated_cells, list) or not all((isinstance(cell, str) for cell in truncated_cells)): raise InvalidFirstRowsError('Expected response["rows"][i]["truncated_cells"] to be a list of strings') row = row_item.get('row') if not isinstance(row, dict): raise InvalidFirstRowsError('Expected response["rows"][i]["row"] to be a dict') for asset_url_path in asset_url_paths: if isinstance(asset_url_path.path[0], str) and asset_url_path.path[0] in truncated_cells: continue self._prepare_asset_url_path_in_place(cell=row, asset_url_path=asset_url_path, revision=revision) # File: dataset-viewer-main/libs/libcommon/src/libcommon/utils.py import base64 import functools import logging import mimetypes import time from collections.abc import Callable, Sequence from datetime import datetime, timedelta, timezone from fnmatch import fnmatch from pathlib import Path from typing import Any, Optional, TypeVar, Union, cast import orjson import pandas as pd import pytz from huggingface_hub import constants, hf_hub_download from requests.exceptions import ReadTimeout from libcommon.exceptions import DatasetInBlockListError def orjson_default(obj: Any) -> Any: if isinstance(obj, bytes): return base64.b64encode(obj).decode('utf-8') if isinstance(obj, pd.Timestamp): return obj.to_pydatetime() return str(obj) def orjson_dumps(content: Any) -> bytes: return orjson.dumps(content, option=orjson.OPT_UTC_Z | orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NON_STR_KEYS, default=orjson_default) def get_json_size(obj: Any) -> int: return len(orjson_dumps(obj)) def utf8_lead_byte(b: int) -> bool: return b & 192 != 128 class SmallerThanMaxBytesError(Exception): pass def serialize_and_truncate(obj: Any, max_bytes: int) -> str: serialized_bytes = orjson_dumps(obj) if len(serialized_bytes) <= max_bytes: raise SmallerThanMaxBytesError() i = max_bytes while i > 0 and (not utf8_lead_byte(serialized_bytes[i])): i -= 1 return serialized_bytes[:i].decode('utf8', 'ignore') def get_datetime(days: Optional[float]=None) -> datetime: date = datetime.now(timezone.utc) if days is not None: date = date - timedelta(days=days) return date def get_duration(started_at: datetime) -> float: started_at = pytz.UTC.localize(started_at) if not started_at.tzinfo else started_at return (get_datetime() - started_at).total_seconds() def get_duration_or_none(started_at: Optional[datetime]) -> Optional[float]: return get_duration(started_at) if started_at else None def get_expires(seconds: float) -> datetime: return datetime.now(timezone.utc) + timedelta(seconds=seconds) def inputs_to_string(dataset: str, revision: str, config: Optional[str]=None, split: Optional[str]=None, prefix: Optional[str]=None) -> str: result = f'{dataset},{revision}' if config is not None: result = f'{result},{config}' if split is not None: result = f'{result},{split}' if prefix is not None: result = f'{prefix},{result}' return result def is_image_url(text: str) -> bool: is_url = text.startswith('https://') or text.startswith('http://') (mime_type, _) = mimetypes.guess_type(text.split('/')[-1].split('?')[0]) return is_url and mime_type is not None and mime_type.startswith('image/') def raise_if_blocked(dataset: str, blocked_datasets: list[str]) -> None: for blocked_dataset in blocked_datasets: parts = blocked_dataset.split('/') if len(parts) > 2 or not blocked_dataset: raise ValueError('The dataset name is not valid. It should be a namespace (user or an organization) and a repo name separated by a `/`, or a simple repo name for canonical datasets.') if '*' in parts[0]: raise ValueError('The namespace name, or the canonical dataset name, cannot contain a wildcard.') if fnmatch(dataset, blocked_dataset): raise DatasetInBlockListError('This dataset has been disabled for now. Please open an issue in https://github.com/huggingface/dataset-viewer if you want this dataset to be supported.') FuncT = TypeVar('FuncT', bound=Callable[..., Any]) RETRY_SLEEPS = (1, 1, 1, 10, 10, 10, 60, 60, 60, 10 * 60) RETRY_ON: tuple[type[Exception]] = (Exception,) class retry: def __init__(self, sleeps: Sequence[float]=RETRY_SLEEPS, on: Sequence[type[Exception]]=RETRY_ON) -> None: self.sleeps = sleeps self.on = on def __call__(self, func: FuncT) -> FuncT: @functools.wraps(func) def decorator(*args: Any, **kwargs: Any) -> Any: attempt = 0 last_err = None while attempt < len(self.sleeps): try: '' duration = self.sleeps[attempt] logging.debug(f'Sleep during {duration} seconds to preventively mitigate rate limiting.') time.sleep(duration) return func(*args, **kwargs) except tuple(self.on) as err: logging.info(f"Got a {type(err).__name__}. Let's retry.") last_err = err attempt += 1 raise RuntimeError(f'Give up after {attempt} attempts. The last one raised {type(last_err)}') from last_err return cast(FuncT, decorator) HF_HUB_HTTP_ERROR_RETRY_SLEEPS = [1, 1, 1, 10, 10, 10] def download_file_from_hub(repo_type: str, revision: str, repo_id: str, filename: str, local_dir: Union[str, Path], hf_token: Optional[str], cache_dir: Union[str, Path, None]=None, force_download: bool=False, resume_download: bool=False) -> None: logging.debug(f'Using {constants.HF_HUB_ENABLE_HF_TRANSFER} for hf_transfer') retry_on = [RuntimeError] if constants.HF_HUB_ENABLE_HF_TRANSFER else [ReadTimeout] retry_download_hub_file = retry(on=retry_on, sleeps=HF_HUB_HTTP_ERROR_RETRY_SLEEPS)(hf_hub_download) retry_download_hub_file(repo_type=repo_type, revision=revision, repo_id=repo_id, filename=filename, local_dir=local_dir, local_dir_use_symlinks=False, token=hf_token, force_download=force_download, cache_dir=cache_dir, resume_download=resume_download) # File: dataset-viewer-main/libs/libcommon/src/libcommon/viewer_utils/asset.py from io import BytesIO from tempfile import NamedTemporaryFile from typing import TYPE_CHECKING, Optional, TypedDict from PIL import Image, ImageOps from pydub import AudioSegment if TYPE_CHECKING: from libcommon.storage_client import StorageClient SUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE = {'.wav': 'audio/wav', '.mp3': 'audio/mpeg', '.opus': 'audio/ogg'} SUPPORTED_AUDIO_EXTENSIONS = SUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE.keys() DATASET_GIT_REVISION_PLACEHOLDER = '{dataset_git_revision}' class ImageSource(TypedDict): src: str height: int width: int class AudioSource(TypedDict): src: str type: str def create_image_file(dataset: str, revision: str, config: str, split: str, row_idx: int, column: str, filename: str, image: Image.Image, format: str, storage_client: 'StorageClient') -> ImageSource: object_key = storage_client.generate_object_key(dataset=dataset, revision=DATASET_GIT_REVISION_PLACEHOLDER, config=config, split=split, row_idx=row_idx, column=column, filename=filename) path = replace_dataset_git_revision_placeholder(object_key, revision=revision) if storage_client.overwrite or not storage_client.exists(path): image = ImageOps.exif_transpose(image) buffer = BytesIO() image.save(fp=buffer, format=format) buffer.seek(0) with storage_client._fs.open(storage_client.get_full_path(path), 'wb') as f: f.write(buffer.read()) return ImageSource(src=storage_client.get_url(object_key, revision=revision), height=image.height, width=image.width) def create_audio_file(dataset: str, revision: str, config: str, split: str, row_idx: int, column: str, audio_file_bytes: bytes, audio_file_extension: Optional[str], filename: str, storage_client: 'StorageClient') -> list[AudioSource]: object_key = storage_client.generate_object_key(dataset=dataset, revision=DATASET_GIT_REVISION_PLACEHOLDER, config=config, split=split, row_idx=row_idx, column=column, filename=filename) suffix = f".{filename.split('.')[-1]}" if suffix not in SUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE: raise ValueError(f"Audio format {suffix} is not supported. Supported formats are {','.join(SUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE)}.") media_type = SUPPORTED_AUDIO_EXTENSION_TO_MEDIA_TYPE[suffix] path = replace_dataset_git_revision_placeholder(object_key, revision=revision) if storage_client.overwrite or not storage_client.exists(path): audio_path = storage_client.get_full_path(path) if audio_file_extension == suffix: with storage_client._fs.open(audio_path, 'wb') as f: f.write(audio_file_bytes) else: with NamedTemporaryFile('wb', suffix=audio_file_extension) as tmpfile: tmpfile.write(audio_file_bytes) segment: AudioSegment = AudioSegment.from_file(tmpfile.name) buffer = BytesIO() segment.export(buffer, format=suffix[1:]) buffer.seek(0) with storage_client._fs.open(audio_path, 'wb') as f: f.write(buffer.read()) return [AudioSource(src=storage_client.get_url(object_key, revision=revision), type=media_type)] def replace_dataset_git_revision_placeholder(url_or_object_key: str, revision: str) -> str: return url_or_object_key.replace(DATASET_GIT_REVISION_PLACEHOLDER, revision) # File: dataset-viewer-main/libs/libcommon/src/libcommon/viewer_utils/features.py import json import os from io import BytesIO from typing import Any, Optional, Union from zlib import adler32 import numpy as np import soundfile from datasets import Array2D, Array3D, Array4D, Array5D, Audio, ClassLabel, Features, Image, Sequence, Translation, TranslationVariableLanguages, Value from datasets.features.features import FeatureType, _visit from PIL import Image as PILImage from libcommon.dtos import FeatureItem from libcommon.storage_client import StorageClient from libcommon.viewer_utils.asset import SUPPORTED_AUDIO_EXTENSIONS, create_audio_file, create_image_file UNSUPPORTED_FEATURES = [Value('binary')] AUDIO_FILE_MAGIC_NUMBERS: dict[str, Any] = {'.wav': [(b'RIFF', 0), (b'WAVE', 8)], '.mp3': (b'\xff\xfb', b'\xff\xf3', b'\xff\xf2', b'ID3')} def append_hash_suffix(string: str, json_path: Optional[list[Union[str, int]]]=None) -> str: return f'{string}-{hex(adler32(json.dumps(json_path).encode()))[2:]}' if json_path else string def image(dataset: str, revision: str, config: str, split: str, row_idx: int, value: Any, featureName: str, storage_client: StorageClient, json_path: Optional[list[Union[str, int]]]=None) -> Any: if value is None: return None if isinstance(value, dict) and value.get('bytes'): value = PILImage.open(BytesIO(value['bytes'])) elif isinstance(value, bytes): value = PILImage.open(BytesIO(value)) elif isinstance(value, dict) and 'path' in value and isinstance(value['path'], str) and os.path.exists(value['path']): value = PILImage.open(value['path']) if not isinstance(value, PILImage.Image): raise TypeError(f"Image cell must be a PIL image or an encoded dict of an image, but got {str(value)[:300]}{('...' if len(str(value)) > 300 else '')}") for (ext, format) in [('.jpg', 'JPEG'), ('.png', 'PNG')]: try: return create_image_file(dataset=dataset, revision=revision, config=config, split=split, row_idx=row_idx, column=featureName, filename=f"{append_hash_suffix('image', json_path)}{ext}", image=value, format=format, storage_client=storage_client) except OSError: continue raise ValueError('Image cannot be written as JPEG or PNG') def audio(dataset: str, revision: str, config: str, split: str, row_idx: int, value: Any, featureName: str, storage_client: StorageClient, json_path: Optional[list[Union[str, int]]]=None) -> Any: if value is None: return None if not isinstance(value, dict): raise TypeError(f"Audio cell must be an encoded dict of an audio sample, but got {str(value)[:300]}{('...' if len(str(value)) > 300 else '')}") audio_file_extension = get_audio_file_extension(value) audio_file_bytes = get_audio_file_bytes(value) if not audio_file_extension: audio_file_extension = infer_audio_file_extension(audio_file_bytes) target_audio_file_extension = audio_file_extension if audio_file_extension in SUPPORTED_AUDIO_EXTENSIONS else '.wav' return create_audio_file(dataset=dataset, revision=revision, config=config, split=split, row_idx=row_idx, column=featureName, audio_file_bytes=audio_file_bytes, audio_file_extension=audio_file_extension, storage_client=storage_client, filename=f"{append_hash_suffix('audio', json_path)}{target_audio_file_extension}") def get_audio_file_bytes(value: Any) -> bytes: if 'bytes' in value and isinstance(value['bytes'], bytes): audio_file_bytes = value['bytes'] elif 'path' in value and isinstance(value['path'], str) and os.path.exists(value['path']): with open(value['path'], 'rb') as f: audio_file_bytes = f.read() elif 'array' in value and isinstance(value['array'], np.ndarray) and ('sampling_rate' in value) and isinstance(value['sampling_rate'], int): buffer = BytesIO() soundfile.write(buffer, value['array'], value['sampling_rate'], format='wav') audio_file_bytes = buffer.getvalue() else: raise ValueError(f"An audio sample should have 'path' and 'bytes' (or 'array' and 'sampling_rate') but got {', '.join(value)}.") return audio_file_bytes def get_audio_file_extension(value: Any) -> Optional[str]: if 'path' in value and isinstance(value['path'], str): audio_file_extension = os.path.splitext(value['path'].split('::')[0])[1] or None elif 'path' in value and value['path'] is None or 'array' in value: audio_file_extension = '.wav' else: raise ValueError(f"An audio sample should have 'path' and 'bytes' (or 'array' and 'sampling_rate') but got {', '.join(value)}.") return audio_file_extension def infer_audio_file_extension(audio_file_bytes: bytes) -> Optional[str]: for (audio_file_extension, magic_numbers) in AUDIO_FILE_MAGIC_NUMBERS.items(): if isinstance(magic_numbers, list): if all((audio_file_bytes.startswith(magic_number, start) for (magic_number, start) in magic_numbers)): return audio_file_extension elif audio_file_bytes.startswith(magic_numbers): return audio_file_extension return None def get_cell_value(dataset: str, revision: str, config: str, split: str, row_idx: int, cell: Any, featureName: str, fieldType: Any, storage_client: StorageClient, json_path: Optional[list[Union[str, int]]]=None) -> Any: if cell is None: return cell if isinstance(fieldType, Image): return image(dataset=dataset, revision=revision, config=config, split=split, row_idx=row_idx, value=cell, featureName=featureName, storage_client=storage_client, json_path=json_path) elif isinstance(fieldType, Audio): return audio(dataset=dataset, revision=revision, config=config, split=split, row_idx=row_idx, value=cell, featureName=featureName, storage_client=storage_client, json_path=json_path) elif isinstance(fieldType, list): if not isinstance(cell, list): raise TypeError('list cell must be a list.') if len(fieldType) != 1: raise TypeError('the feature type should be a 1-element list.') subFieldType = fieldType[0] return [get_cell_value(dataset=dataset, revision=revision, config=config, split=split, row_idx=row_idx, cell=subCell, featureName=featureName, fieldType=subFieldType, storage_client=storage_client, json_path=json_path + [idx] if json_path else [idx]) for (idx, subCell) in enumerate(cell)] elif isinstance(fieldType, Sequence): if isinstance(cell, list): if fieldType.length >= 0 and len(cell) != fieldType.length: raise TypeError('the cell length should be the same as the Sequence length.') return [get_cell_value(dataset=dataset, revision=revision, config=config, split=split, row_idx=row_idx, cell=subCell, featureName=featureName, fieldType=fieldType.feature, storage_client=storage_client, json_path=json_path + [idx] if json_path else [idx]) for (idx, subCell) in enumerate(cell)] if isinstance(cell, dict): if any((not isinstance(v, list) or k not in fieldType.feature for (k, v) in cell.items())): raise TypeError('The value of a Sequence of dicts should be a dictionary of lists.') return {key: [get_cell_value(dataset=dataset, revision=revision, config=config, split=split, row_idx=row_idx, cell=subCellItem, featureName=featureName, fieldType=fieldType.feature[key], storage_client=storage_client, json_path=json_path + [key, idx] if json_path else [key, idx]) for (idx, subCellItem) in enumerate(subCell)] for (key, subCell) in cell.items()} raise TypeError('Sequence cell must be a list or a dict.') elif isinstance(fieldType, dict): if not isinstance(cell, dict): raise TypeError('dict cell must be a dict.') return {key: get_cell_value(dataset=dataset, revision=revision, config=config, split=split, row_idx=row_idx, cell=subCell, featureName=featureName, fieldType=fieldType[key], storage_client=storage_client, json_path=json_path + [key] if json_path else [key]) for (key, subCell) in cell.items()} elif isinstance(fieldType, (Value, ClassLabel, Array2D, Array3D, Array4D, Array5D, Translation, TranslationVariableLanguages)): return cell else: raise TypeError('could not determine the type of the data cell.') def to_features_list(features: Features) -> list[FeatureItem]: features_dict = features.to_dict() return [{'feature_idx': idx, 'name': name, 'type': features_dict[name]} for (idx, name) in enumerate(features)] def get_supported_unsupported_columns(features: Features, unsupported_features: list[FeatureType]=UNSUPPORTED_FEATURES) -> tuple[list[str], list[str]]: (supported_columns, unsupported_columns) = ([], []) for (column, feature) in features.items(): str_column = str(column) supported = True def classify(feature: FeatureType) -> None: nonlocal supported for unsupported_feature in unsupported_features: if type(unsupported_feature) == type(feature) == Value: if unsupported_feature.dtype == feature.dtype: supported = False elif type(unsupported_feature) == type(feature): supported = False _visit(feature, classify) if supported: supported_columns.append(str_column) else: unsupported_columns.append(str_column) return (supported_columns, unsupported_columns) # File: dataset-viewer-main/libs/libcommon/src/libcommon/viewer_utils/parquet_metadata.py from os import makedirs from pathlib import Path import pyarrow.parquet as pq from libcommon.constants import DATASET_SEPARATOR from libcommon.storage import StrPath PARQUET_METADATA_DIR_MODE = 493 def create_parquet_metadata_dir(dataset: str, config: str, split: str, parquet_metadata_directory: StrPath) -> tuple[Path, str]: dir_path = Path(parquet_metadata_directory).resolve() / dataset / DATASET_SEPARATOR / config / split parquet_metadata_dir_subpath = f'{dataset}/{DATASET_SEPARATOR}/{config}/{split}' makedirs(dir_path, PARQUET_METADATA_DIR_MODE, exist_ok=True) return (dir_path, parquet_metadata_dir_subpath) def create_parquet_metadata_file(dataset: str, config: str, split: str, parquet_file_metadata: pq.FileMetaData, filename: str, parquet_metadata_directory: StrPath, overwrite: bool=True) -> str: (dir_path, parquet_metadata_dir_subpath) = create_parquet_metadata_dir(dataset=dataset, config=config, split=split, parquet_metadata_directory=parquet_metadata_directory) parquet_metadata_file_path = dir_path / filename if overwrite or not parquet_metadata_file_path.exists(): parquet_file_metadata.write_metadata_file(parquet_metadata_file_path) parquet_metadata_subpath = f'{parquet_metadata_dir_subpath}/{filename}' return parquet_metadata_subpath # File: dataset-viewer-main/libs/libcommon/src/libcommon/viewer_utils/rows.py from typing import Protocol from datasets import Audio, Features, Image, Value from libcommon.dtos import Row, RowsContent, SplitFirstRowsResponse from libcommon.exceptions import RowsPostProcessingError, TooBigContentError, TooManyColumnsError from libcommon.storage_client import StorageClient from libcommon.utils import get_json_size from libcommon.viewer_utils.features import get_cell_value, to_features_list from libcommon.viewer_utils.truncate_rows import create_truncated_row_items URL_COLUMN_RATIO = 0.3 def transform_rows(dataset: str, revision: str, config: str, split: str, rows: list[Row], features: Features, storage_client: StorageClient) -> list[Row]: return [{featureName: get_cell_value(dataset=dataset, revision=revision, config=config, split=split, row_idx=row_idx, cell=row[featureName] if featureName in row else None, featureName=featureName, fieldType=fieldType, storage_client=storage_client) for (featureName, fieldType) in features.items()} for (row_idx, row) in enumerate(rows)] class GetRowsContent(Protocol): def __call__(self, rows_max_number: int) -> RowsContent: ... def create_first_rows_response(dataset: str, revision: str, config: str, split: str, storage_client: StorageClient, features: Features, get_rows_content: GetRowsContent, min_cell_bytes: int, rows_max_bytes: int, rows_max_number: int, rows_min_number: int, columns_max_number: int) -> SplitFirstRowsResponse: if features and len(features) > columns_max_number: raise TooManyColumnsError(f'The number of columns ({len(features)}) exceeds the maximum supported number of columns ({columns_max_number}). This is a current limitation of the datasets viewer. You can reduce the number of columns if you want the viewer to work.') features_list = to_features_list(features=features) response_features_only: SplitFirstRowsResponse = {'dataset': dataset, 'config': config, 'split': split, 'features': features_list, 'rows': [], 'truncated': False} surrounding_json_size = get_json_size(response_features_only) if surrounding_json_size > rows_max_bytes: raise TooBigContentError(f'The size of the content of the first rows ({surrounding_json_size} B) exceeds the maximum supported size ({rows_max_bytes} B) even after truncation. Please report the issue.') rows_content = get_rows_content(rows_max_number) if len(rows_content.rows) > rows_max_number: raise ValueError(f'The number of rows ({len(rows_content.rows)}) exceeds the maximum supported number of rows ({rows_max_number}).') try: transformed_rows = transform_rows(dataset=dataset, revision=revision, config=config, split=split, rows=rows_content.rows, features=features, storage_client=storage_client) except Exception as err: raise RowsPostProcessingError('Server error while post-processing the split rows. Please report the issue.', cause=err) from err columns_to_keep_untruncated = [col for (col, feature) in features.items() if isinstance(feature, (Image, Audio)) or (isinstance(feature, Value) and feature.dtype == 'string' and (len(transformed_rows) > 0) and (sum((col in row and isinstance(row[col], str) and (row[col].startswith('http://') or row[col].startswith('https://')) for row in transformed_rows)) / len(transformed_rows) > URL_COLUMN_RATIO))] (row_items, truncated) = create_truncated_row_items(rows=transformed_rows, min_cell_bytes=min_cell_bytes, rows_max_bytes=rows_max_bytes - surrounding_json_size, rows_min_number=rows_min_number, columns_to_keep_untruncated=columns_to_keep_untruncated, truncated_columns=rows_content.truncated_columns) response = response_features_only response['rows'] = row_items response['truncated'] = not rows_content.all_fetched or truncated return response # File: dataset-viewer-main/libs/libcommon/src/libcommon/viewer_utils/truncate_rows.py from libcommon.dtos import Row, RowItem from libcommon.utils import SmallerThanMaxBytesError, get_json_size, serialize_and_truncate def to_row_item(row_idx: int, row: Row) -> RowItem: return {'row_idx': row_idx, 'row': row, 'truncated_cells': []} def truncate_row_item(row_item: RowItem, min_cell_bytes: int, columns_to_keep_untruncated: list[str]) -> RowItem: for (column_name, cell) in row_item['row'].items(): if column_name in columns_to_keep_untruncated: continue try: truncated_serialized_cell = serialize_and_truncate(obj=cell, max_bytes=min_cell_bytes) row_item['row'][column_name] = truncated_serialized_cell if column_name not in row_item['truncated_cells']: row_item['truncated_cells'].append(column_name) except SmallerThanMaxBytesError: continue return row_item def truncate_row_items_cells(row_items: list[RowItem], min_cell_bytes: int, rows_max_bytes: int, columns_to_keep_untruncated: list[str]) -> list[RowItem]: rows_bytes = get_json_size(row_items) for row_item in reversed(row_items): if rows_bytes < rows_max_bytes: break previous_size = get_json_size(row_item) row_item = truncate_row_item(row_item=row_item, min_cell_bytes=min_cell_bytes, columns_to_keep_untruncated=columns_to_keep_untruncated) new_size = get_json_size(row_item) rows_bytes += new_size - previous_size return row_items COMMA_SIZE = 1 BRACKET_SIZE = 1 def create_truncated_row_items(rows: list[Row], min_cell_bytes: int, rows_max_bytes: int, rows_min_number: int, columns_to_keep_untruncated: list[str], truncated_columns: list[str]) -> tuple[list[RowItem], bool]: row_items = [] rows_bytes = 2 * BRACKET_SIZE for (row_idx, row) in enumerate(rows[:rows_min_number]): row_item = to_row_item(row_idx=row_idx, row=row) row_item['truncated_cells'] = list(truncated_columns) rows_bytes += get_json_size(row_item) + COMMA_SIZE row_items.append(row_item) if rows_bytes >= rows_max_bytes: truncated_row_items = truncate_row_items_cells(row_items=row_items, min_cell_bytes=min_cell_bytes, rows_max_bytes=rows_max_bytes, columns_to_keep_untruncated=columns_to_keep_untruncated) return (truncated_row_items, len(truncated_row_items) < len(rows)) for (idx, row) in enumerate(rows[rows_min_number:]): row_idx = rows_min_number + idx row_item = to_row_item(row_idx=row_idx, row=row) rows_bytes += get_json_size(row_item) + COMMA_SIZE if rows_bytes >= rows_max_bytes: break row_items.append(row_item) return (row_items, len(row_items) < len(rows)) # File: dataset-viewer-main/services/admin/src/admin/app.py import uvicorn from libapi.utils import EXPOSED_HEADERS from libcommon.log import init_logging from libcommon.processing_graph import processing_graph from libcommon.resources import CacheMongoResource, QueueMongoResource, Resource from libcommon.storage import init_parquet_metadata_dir from libcommon.storage_client import StorageClient from starlette.applications import Starlette from starlette.middleware import Middleware from starlette.middleware.cors import CORSMiddleware from starlette.middleware.gzip import GZipMiddleware from starlette.routing import Route from starlette_prometheus import PrometheusMiddleware from admin.config import AppConfig, UvicornConfig from admin.routes.blocked_datasets import create_blocked_datasets_endpoint from admin.routes.cache_reports import create_cache_reports_endpoint from admin.routes.cache_reports_with_content import create_cache_reports_with_content_endpoint from admin.routes.dataset_status import create_dataset_status_endpoint from admin.routes.force_refresh import create_force_refresh_endpoint from admin.routes.healthcheck import healthcheck_endpoint from admin.routes.metrics import create_metrics_endpoint from admin.routes.num_dataset_infos_by_builder_name import create_num_dataset_infos_by_builder_name_endpoint from admin.routes.pending_jobs import create_pending_jobs_endpoint from admin.routes.recreate_dataset import create_recreate_dataset_endpoint def create_app() -> Starlette: app_config = AppConfig.from_env() init_logging(level=app_config.log.level) parquet_metadata_directory = init_parquet_metadata_dir(directory=app_config.parquet_metadata.storage_directory) cached_assets_storage_client = StorageClient(protocol=app_config.cached_assets.storage_protocol, storage_root=app_config.cached_assets.storage_root, base_url=app_config.cached_assets.base_url, s3_config=app_config.s3) assets_storage_client = StorageClient(protocol=app_config.assets.storage_protocol, storage_root=app_config.assets.storage_root, base_url=app_config.assets.base_url, s3_config=app_config.s3) storage_clients = [cached_assets_storage_client, assets_storage_client] cache_resource = CacheMongoResource(database=app_config.cache.mongo_database, host=app_config.cache.mongo_url) queue_resource = QueueMongoResource(database=app_config.queue.mongo_database, host=app_config.queue.mongo_url) resources: list[Resource] = [cache_resource, queue_resource] if not cache_resource.is_available(): raise RuntimeError('The connection to the cache database could not be established. Exiting.') if not queue_resource.is_available(): raise RuntimeError('The connection to the queue database could not be established. Exiting.') middleware = [Middleware(CORSMiddleware, allow_origins=['*'], allow_methods=['*'], allow_headers=['*'], allow_credentials=True, expose_headers=EXPOSED_HEADERS), Middleware(GZipMiddleware), Middleware(PrometheusMiddleware, filter_unhandled_paths=True)] routes = [Route('/healthcheck', endpoint=healthcheck_endpoint), Route('/admin/healthcheck', endpoint=healthcheck_endpoint), Route('/admin/metrics', endpoint=create_metrics_endpoint(parquet_metadata_directory=parquet_metadata_directory)), Route('/admin/pending-jobs', endpoint=create_pending_jobs_endpoint(max_age=app_config.admin.max_age, external_auth_url=app_config.admin.external_auth_url, organization=app_config.admin.hf_organization, hf_timeout_seconds=app_config.admin.hf_timeout_seconds)), Route('/admin/blocked-datasets', endpoint=create_blocked_datasets_endpoint(max_age=app_config.admin.max_age, external_auth_url=app_config.admin.external_auth_url, organization=app_config.admin.hf_organization, hf_timeout_seconds=app_config.admin.hf_timeout_seconds)), Route('/admin/dataset-status', endpoint=create_dataset_status_endpoint(max_age=app_config.admin.max_age, external_auth_url=app_config.admin.external_auth_url, organization=app_config.admin.hf_organization, hf_timeout_seconds=app_config.admin.hf_timeout_seconds)), Route('/admin/num-dataset-infos-by-builder-name', endpoint=create_num_dataset_infos_by_builder_name_endpoint(max_age=app_config.admin.max_age, external_auth_url=app_config.admin.external_auth_url, organization=app_config.admin.hf_organization, hf_timeout_seconds=app_config.admin.hf_timeout_seconds)), Route('/admin/recreate-dataset', endpoint=create_recreate_dataset_endpoint(hf_endpoint=app_config.common.hf_endpoint, hf_token=app_config.common.hf_token, external_auth_url=app_config.admin.external_auth_url, organization=app_config.admin.hf_organization, hf_timeout_seconds=app_config.admin.hf_timeout_seconds, blocked_datasets=app_config.common.blocked_datasets, storage_clients=storage_clients), methods=['POST'])] for processing_step in processing_graph.get_processing_steps(): cache_kind = processing_step.cache_kind job_type = processing_step.job_type input_type = processing_step.input_type routes.extend([Route(f'/admin/force-refresh/{job_type}', endpoint=create_force_refresh_endpoint(input_type=input_type, job_type=job_type, bonus_difficulty_if_dataset_is_big=processing_step.bonus_difficulty_if_dataset_is_big, hf_endpoint=app_config.common.hf_endpoint, hf_token=app_config.common.hf_token, external_auth_url=app_config.admin.external_auth_url, organization=app_config.admin.hf_organization, hf_timeout_seconds=app_config.admin.hf_timeout_seconds, blocked_datasets=app_config.common.blocked_datasets), methods=['POST']), Route(f'/admin/cache-reports/{cache_kind}', endpoint=create_cache_reports_endpoint(cache_kind=cache_kind, cache_reports_num_results=app_config.admin.cache_reports_num_results, max_age=app_config.admin.max_age, external_auth_url=app_config.admin.external_auth_url, organization=app_config.admin.hf_organization, hf_timeout_seconds=app_config.admin.hf_timeout_seconds)), Route(f'/admin/cache-reports-with-content/{cache_kind}', endpoint=create_cache_reports_with_content_endpoint(cache_kind=cache_kind, cache_reports_with_content_num_results=app_config.admin.cache_reports_with_content_num_results, max_age=app_config.admin.max_age, external_auth_url=app_config.admin.external_auth_url, organization=app_config.admin.hf_organization, hf_timeout_seconds=app_config.admin.hf_timeout_seconds))]) return Starlette(routes=routes, middleware=middleware, on_shutdown=[resource.release for resource in resources]) def start() -> None: uvicorn_config = UvicornConfig.from_env() uvicorn.run('app:create_app', host=uvicorn_config.hostname, port=uvicorn_config.port, factory=True, workers=uvicorn_config.num_workers) # File: dataset-viewer-main/services/admin/src/admin/authentication.py from typing import Literal, Optional import httpx from libapi.authentication import RequestAuth from libapi.exceptions import ExternalAuthenticatedError, ExternalUnauthenticatedError from starlette.requests import Request async def auth_check(external_auth_url: Optional[str]=None, request: Optional[Request]=None, organization: Optional[str]=None, hf_timeout_seconds: Optional[float]=None) -> Literal[True]: if organization is None or external_auth_url is None: return True try: async with httpx.AsyncClient() as client: response = await client.get(external_auth_url, auth=RequestAuth(request), timeout=hf_timeout_seconds) except Exception as err: raise RuntimeError('External authentication check failed', err) from err if response.status_code == 200: try: json = response.json() if organization is None or organization in {org['name'] for org in json['orgs']}: return True else: raise ExternalAuthenticatedError('You are not member of the organization') except Exception as err: raise ExternalAuthenticatedError('Cannot access the route with the current credentials. Please retry with other authentication credentials.') from err elif response.status_code == 401: raise ExternalUnauthenticatedError('Cannot access the route. Please retry with authentication.') elif response.status_code in {403, 404}: raise ExternalAuthenticatedError('Cannot access the route with the current credentials. Please retry with other authentication credentials.') else: raise ValueError(f'Unexpected status code {response.status_code}') # File: dataset-viewer-main/services/admin/src/admin/config.py from dataclasses import dataclass, field from typing import Optional from environs import Env from libcommon.config import AssetsConfig, CacheConfig, CachedAssetsConfig, CommonConfig, LogConfig, ParquetMetadataConfig, QueueConfig, S3Config ADMIN_UVICORN_HOSTNAME = 'localhost' ADMIN_UVICORN_NUM_WORKERS = 2 ADMIN_UVICORN_PORT = 8000 @dataclass(frozen=True) class UvicornConfig: hostname: str = ADMIN_UVICORN_HOSTNAME num_workers: int = ADMIN_UVICORN_NUM_WORKERS port: int = ADMIN_UVICORN_PORT @classmethod def from_env(cls) -> 'UvicornConfig': env = Env(expand_vars=True) with env.prefixed('ADMIN_UVICORN_'): return cls(hostname=env.str(name='HOSTNAME', default=ADMIN_UVICORN_HOSTNAME), num_workers=env.int(name='NUM_WORKERS', default=ADMIN_UVICORN_NUM_WORKERS), port=env.int(name='PORT', default=ADMIN_UVICORN_PORT)) ADMIN_CACHE_REPORTS_NUM_RESULTS = 100 ADMIN_CACHE_REPORTS_WITH_CONTENT_NUM_RESULTS = 100 ADMIN_EXTERNAL_AUTH_URL = None ADMIN_HF_ORGANIZATION = None ADMIN_HF_TIMEOUT_SECONDS = 0.2 ADMIN_HF_WHOAMI_PATH = '/api/whoami-v2' ADMIN_MAX_AGE = 10 @dataclass(frozen=True) class AdminConfig: cache_reports_num_results: int = ADMIN_CACHE_REPORTS_NUM_RESULTS cache_reports_with_content_num_results: int = ADMIN_CACHE_REPORTS_WITH_CONTENT_NUM_RESULTS external_auth_url: Optional[str] = ADMIN_EXTERNAL_AUTH_URL hf_organization: Optional[str] = ADMIN_HF_ORGANIZATION hf_timeout_seconds: Optional[float] = ADMIN_HF_TIMEOUT_SECONDS hf_whoami_path: str = ADMIN_HF_WHOAMI_PATH max_age: int = ADMIN_MAX_AGE @classmethod def from_env(cls, common_config: CommonConfig) -> 'AdminConfig': env = Env(expand_vars=True) with env.prefixed('ADMIN_'): hf_whoami_path = env.str(name='HF_WHOAMI_PATH', default=ADMIN_HF_WHOAMI_PATH) external_auth_url = None if hf_whoami_path is None else f'{common_config.hf_endpoint}{hf_whoami_path}' return cls(cache_reports_num_results=env.int(name='CACHE_REPORTS_NUM_RESULTS', default=ADMIN_CACHE_REPORTS_NUM_RESULTS), cache_reports_with_content_num_results=env.int(name='CACHE_REPORTS_WITH_CONTENT_NUM_RESULTS', default=ADMIN_CACHE_REPORTS_WITH_CONTENT_NUM_RESULTS), external_auth_url=external_auth_url, hf_organization=env.str(name='HF_ORGANIZATION', default=ADMIN_HF_ORGANIZATION), hf_timeout_seconds=env.float(name='HF_TIMEOUT_SECONDS', default=ADMIN_HF_TIMEOUT_SECONDS), hf_whoami_path=hf_whoami_path, max_age=env.int(name='MAX_AGE', default=ADMIN_MAX_AGE)) @dataclass(frozen=True) class AppConfig: admin: AdminConfig = field(default_factory=AdminConfig) assets: AssetsConfig = field(default_factory=AssetsConfig) cache: CacheConfig = field(default_factory=CacheConfig) cached_assets: CachedAssetsConfig = field(default_factory=CachedAssetsConfig) common: CommonConfig = field(default_factory=CommonConfig) log: LogConfig = field(default_factory=LogConfig) parquet_metadata: ParquetMetadataConfig = field(default_factory=ParquetMetadataConfig) queue: QueueConfig = field(default_factory=QueueConfig) s3: S3Config = field(default_factory=S3Config) @classmethod def from_env(cls) -> 'AppConfig': common_config = CommonConfig.from_env() return cls(common=common_config, assets=AssetsConfig.from_env(), cache=CacheConfig.from_env(), cached_assets=CachedAssetsConfig.from_env(), log=LogConfig.from_env(), parquet_metadata=ParquetMetadataConfig.from_env(), queue=QueueConfig.from_env(), admin=AdminConfig.from_env(common_config), s3=S3Config.from_env()) # File: dataset-viewer-main/services/admin/src/admin/routes/blocked_datasets.py import logging from typing import Optional from libapi.exceptions import ApiError, UnexpectedApiError from libapi.utils import Endpoint, get_json_api_error_response, get_json_ok_response from libcommon.queue.dataset_blockages import get_blocked_datasets from starlette.requests import Request from starlette.responses import Response from admin.authentication import auth_check def create_blocked_datasets_endpoint(max_age: int, external_auth_url: Optional[str]=None, organization: Optional[str]=None, hf_timeout_seconds: Optional[float]=None) -> Endpoint: async def blocked_datasets_endpoint(request: Request) -> Response: logging.info('/blocked-datasets') try: await auth_check(external_auth_url=external_auth_url, request=request, organization=organization, hf_timeout_seconds=hf_timeout_seconds) return get_json_ok_response({'blocked_datasets': get_blocked_datasets()}, max_age=max_age) except ApiError as e: return get_json_api_error_response(e, max_age=max_age) except Exception as e: return get_json_api_error_response(UnexpectedApiError('Unexpected error.', e), max_age=max_age) return blocked_datasets_endpoint # File: dataset-viewer-main/services/admin/src/admin/routes/cache_reports.py import logging from typing import Optional from libapi.exceptions import ApiError, InvalidParameterError, UnexpectedApiError from libapi.request import get_request_parameter from libapi.utils import Endpoint, get_json_api_error_response, get_json_ok_response from libcommon.simple_cache import InvalidCursor, InvalidLimit, get_cache_reports from starlette.requests import Request from starlette.responses import Response from admin.authentication import auth_check def create_cache_reports_endpoint(cache_kind: str, cache_reports_num_results: int, max_age: int, external_auth_url: Optional[str]=None, organization: Optional[str]=None, hf_timeout_seconds: Optional[float]=None) -> Endpoint: async def cache_reports_endpoint(request: Request) -> Response: try: cursor = get_request_parameter(request, 'cursor') logging.info(f'Cache reports for {cache_kind}, cursor={cursor}') await auth_check(external_auth_url=external_auth_url, request=request, organization=organization, hf_timeout_seconds=hf_timeout_seconds) try: return get_json_ok_response(get_cache_reports(kind=cache_kind, cursor=cursor, limit=cache_reports_num_results), max_age=max_age) except InvalidCursor as e: raise InvalidParameterError('Invalid cursor.') from e except InvalidLimit as e: raise UnexpectedApiError('Invalid limit. CACHE_REPORTS_NUM_RESULTS must be a strictly positive integer.') from e except ApiError as e: return get_json_api_error_response(e, max_age=max_age) except Exception as e: return get_json_api_error_response(UnexpectedApiError('Unexpected error.', e), max_age=max_age) return cache_reports_endpoint # File: dataset-viewer-main/services/admin/src/admin/routes/cache_reports_with_content.py import logging from typing import Optional from libapi.exceptions import ApiError, InvalidParameterError, UnexpectedApiError from libapi.request import get_request_parameter from libapi.utils import Endpoint, get_json_api_error_response, get_json_ok_response from libcommon.simple_cache import InvalidCursor, InvalidLimit, get_cache_reports_with_content from starlette.requests import Request from starlette.responses import Response from admin.authentication import auth_check def create_cache_reports_with_content_endpoint(cache_kind: str, cache_reports_with_content_num_results: int, max_age: int, external_auth_url: Optional[str]=None, organization: Optional[str]=None, hf_timeout_seconds: Optional[float]=None) -> Endpoint: async def cache_reports_with_content_endpoint(request: Request) -> Response: try: cursor = get_request_parameter(request, 'cursor') logging.info(f'Cache reports with content for {cache_kind}, cursor={cursor}') await auth_check(external_auth_url=external_auth_url, request=request, organization=organization, hf_timeout_seconds=hf_timeout_seconds) try: return get_json_ok_response(get_cache_reports_with_content(kind=cache_kind, cursor=cursor, limit=cache_reports_with_content_num_results), max_age=max_age) except InvalidCursor as e: raise InvalidParameterError('Invalid cursor.') from e except InvalidLimit as e: raise UnexpectedApiError('Invalid limit. CACHE_REPORTS_WITH_CONTENT_NUM_RESULTS must be a strictly positive integer.') from e except ApiError as e: return get_json_api_error_response(e, max_age=max_age) except Exception as e: return get_json_api_error_response(UnexpectedApiError('Unexpected error.', e), max_age=max_age) return cache_reports_with_content_endpoint # File: dataset-viewer-main/services/admin/src/admin/routes/dataset_status.py import logging from typing import Optional from libapi.exceptions import ApiError, UnexpectedApiError from libapi.request import get_request_parameter from libapi.utils import Endpoint, get_json_api_error_response, get_json_ok_response from libcommon.processing_graph import processing_graph from libcommon.queue.jobs import Queue from libcommon.simple_cache import get_dataset_responses_without_content_for_kind from starlette.requests import Request from starlette.responses import Response from admin.authentication import auth_check def create_dataset_status_endpoint(max_age: int, external_auth_url: Optional[str]=None, organization: Optional[str]=None, hf_timeout_seconds: Optional[float]=None) -> Endpoint: async def dataset_status_endpoint(request: Request) -> Response: try: dataset = get_request_parameter(request, 'dataset', required=True) logging.info(f'/dataset-status, dataset={dataset}') await auth_check(external_auth_url=external_auth_url, request=request, organization=organization, hf_timeout_seconds=hf_timeout_seconds) queue = Queue() return get_json_ok_response({processing_step.name: {'cached_responses': get_dataset_responses_without_content_for_kind(kind=processing_step.cache_kind, dataset=dataset), 'jobs': queue.get_dataset_pending_jobs_for_type(dataset=dataset, job_type=processing_step.job_type)} for processing_step in processing_graph.get_alphabetically_ordered_processing_steps()}, max_age=max_age) except ApiError as e: return get_json_api_error_response(e, max_age=max_age) except Exception as e: return get_json_api_error_response(UnexpectedApiError('Unexpected error.', e), max_age=max_age) return dataset_status_endpoint # File: dataset-viewer-main/services/admin/src/admin/routes/force_refresh.py import logging from typing import Optional from libapi.exceptions import InvalidParameterError, MissingRequiredParameterError, UnexpectedApiError from libapi.request import get_request_parameter from libapi.utils import Endpoint, are_valid_parameters, get_json_api_error_response, get_json_ok_response from libcommon.constants import DEFAULT_DIFFICULTY_MAX, DEFAULT_DIFFICULTY_MIN, MIN_BYTES_FOR_BONUS_DIFFICULTY from libcommon.dtos import Priority from libcommon.exceptions import CustomError from libcommon.operations import get_latest_dataset_revision_if_supported_or_raise from libcommon.orchestrator import get_num_bytes_from_config_infos from libcommon.processing_graph import InputType, processing_graph from libcommon.queue.jobs import Queue from starlette.requests import Request from starlette.responses import Response from admin.authentication import auth_check def create_force_refresh_endpoint(input_type: InputType, job_type: str, bonus_difficulty_if_dataset_is_big: int, blocked_datasets: list[str], hf_endpoint: str, hf_token: Optional[str]=None, external_auth_url: Optional[str]=None, organization: Optional[str]=None, hf_timeout_seconds: Optional[float]=None) -> Endpoint: async def force_refresh_endpoint(request: Request) -> Response: try: dataset = get_request_parameter(request, 'dataset', required=True) if input_type == 'dataset': config = None split = None elif input_type == 'config': config = get_request_parameter(request, 'config', required=True) split = None else: config = get_request_parameter(request, 'config', required=True) split = get_request_parameter(request, 'split', required=True) if not are_valid_parameters([config, split]): raise MissingRequiredParameterError("Parameters 'config' and 'split' are required") try: priority = Priority(get_request_parameter(request, 'priority', default='low')) except ValueError: raise InvalidParameterError(f"Parameter 'priority' should be one of {', '.join((prio.value for prio in Priority))}.") difficulty_error_message = "Parameter 'difficulty' should be an int value between 0 and 100." try: total_difficulty = int(get_request_parameter(request, 'difficulty', default=str(processing_graph.get_processing_step(job_type).difficulty))) if total_difficulty > DEFAULT_DIFFICULTY_MAX or total_difficulty < DEFAULT_DIFFICULTY_MIN: raise InvalidParameterError(difficulty_error_message) except ValueError: raise InvalidParameterError(difficulty_error_message) if config is not None: num_bytes = get_num_bytes_from_config_infos(dataset=dataset, config=config, split=split) if num_bytes is not None and num_bytes > MIN_BYTES_FOR_BONUS_DIFFICULTY: total_difficulty += bonus_difficulty_if_dataset_is_big logging.info(f'/force-refresh/{job_type}, dataset={dataset!r}, config={config!r}, split={split!r}, priority={priority!r}, total_difficulty={total_difficulty!r}') await auth_check(external_auth_url=external_auth_url, request=request, organization=organization, hf_timeout_seconds=hf_timeout_seconds) revision = get_latest_dataset_revision_if_supported_or_raise(dataset=dataset, hf_endpoint=hf_endpoint, hf_token=hf_token, hf_timeout_seconds=hf_timeout_seconds, blocked_datasets=blocked_datasets) Queue().add_job(job_type=job_type, difficulty=total_difficulty, dataset=dataset, revision=revision, config=config, split=split, priority=priority) return get_json_ok_response({'status': 'ok'}, max_age=0) except CustomError as e: return get_json_api_error_response(e, max_age=0) except Exception as e: return get_json_api_error_response(UnexpectedApiError('Unexpected error.', e), max_age=0) return force_refresh_endpoint # File: dataset-viewer-main/services/admin/src/admin/routes/metrics.py import logging from libapi.utils import Endpoint from libcommon.prometheus import Prometheus, update_parquet_metadata_disk_usage, update_queue_jobs_total, update_responses_in_cache_total, update_worker_size_jobs_count from libcommon.storage import StrPath from prometheus_client import CONTENT_TYPE_LATEST from starlette.requests import Request from starlette.responses import Response def create_metrics_endpoint(parquet_metadata_directory: StrPath) -> Endpoint: prometheus = Prometheus() async def metrics_endpoint(_: Request) -> Response: logging.info('/metrics') update_queue_jobs_total() update_worker_size_jobs_count() update_responses_in_cache_total() update_parquet_metadata_disk_usage(directory=parquet_metadata_directory) return Response(prometheus.getLatestContent(), headers={'Content-Type': CONTENT_TYPE_LATEST}) return metrics_endpoint # File: dataset-viewer-main/services/admin/src/admin/routes/num_dataset_infos_by_builder_name.py import logging from typing import Optional from libapi.exceptions import ApiError, UnexpectedApiError from libapi.utils import Endpoint, get_json_api_error_response, get_json_ok_response from libcommon.simple_cache import CachedResponseDocument from starlette.requests import Request from starlette.responses import Response from admin.authentication import auth_check def create_num_dataset_infos_by_builder_name_endpoint(max_age: int, external_auth_url: Optional[str]=None, organization: Optional[str]=None, hf_timeout_seconds: Optional[float]=None) -> Endpoint: async def usage_endpoint(request: Request) -> Response: try: logging.info('/num-dataset-infos-by-builder-name') await auth_check(external_auth_url=external_auth_url, request=request, organization=organization, hf_timeout_seconds=hf_timeout_seconds) num_datasets_infos = CachedResponseDocument.objects(http_status=200, kind='dataset-info').count() num_parquet_datasets_infos = CachedResponseDocument.objects(http_status=200, kind='dataset-info', content__dataset_info__default__builder_name='parquet').count() num_csv_datasets_infos = CachedResponseDocument.objects(http_status=200, kind='dataset-info', content__dataset_info__default__builder_name='csv').count() num_text_datasets_infos = CachedResponseDocument.objects(http_status=200, kind='dataset-info', content__dataset_info__default__builder_name='text').count() num_imagefolder_datasets_infos = CachedResponseDocument.objects(http_status=200, kind='dataset-info', content__dataset_info__default__builder_name='imagefolder').count() num_audiofolder_datasets_infos = CachedResponseDocument.objects(http_status=200, kind='dataset-info', content__dataset_info__default__builder_name='audiofolder').count() num_json_datasets_infos = CachedResponseDocument.objects(http_status=200, kind='dataset-info', content__dataset_info__default__builder_name='json').count() num_arrow_datasets_infos = CachedResponseDocument.objects(http_status=200, kind='dataset-info', content__dataset_info__default__builder_name='arrow').count() num_webdataset_datasets_infos = CachedResponseDocument.objects(http_status=200, kind='dataset-info', content__dataset_info__default__builder_name='webdataset').count() num_other_dataset_infos = num_datasets_infos - (num_parquet_datasets_infos + num_csv_datasets_infos + num_text_datasets_infos + num_imagefolder_datasets_infos + num_audiofolder_datasets_infos + num_json_datasets_infos + num_arrow_datasets_infos + num_webdataset_datasets_infos) return get_json_ok_response({'parquet': num_parquet_datasets_infos, 'csv': num_csv_datasets_infos, 'text': num_text_datasets_infos, 'imagefolder': num_imagefolder_datasets_infos, 'audiofolder': num_audiofolder_datasets_infos, 'json': num_json_datasets_infos, 'arrow': num_arrow_datasets_infos, 'webdataset': num_webdataset_datasets_infos, 'other': num_other_dataset_infos}, max_age=max_age) except ApiError as e: return get_json_api_error_response(e, max_age=max_age) except Exception as e: return get_json_api_error_response(UnexpectedApiError('Unexpected error.', e), max_age=max_age) return usage_endpoint # File: dataset-viewer-main/services/admin/src/admin/routes/pending_jobs.py import logging from typing import Optional from libapi.exceptions import ApiError, UnexpectedApiError from libapi.utils import Endpoint, get_json_api_error_response, get_json_ok_response from libcommon.processing_graph import processing_graph from libcommon.queue.jobs import Queue from starlette.requests import Request from starlette.responses import Response from admin.authentication import auth_check def create_pending_jobs_endpoint(max_age: int, external_auth_url: Optional[str]=None, organization: Optional[str]=None, hf_timeout_seconds: Optional[float]=None) -> Endpoint: async def pending_jobs_endpoint(request: Request) -> Response: logging.info('/pending-jobs') try: await auth_check(external_auth_url=external_auth_url, request=request, organization=organization, hf_timeout_seconds=hf_timeout_seconds) queue = Queue() return get_json_ok_response({processing_step.job_type: queue.get_dump_by_pending_status(job_type=processing_step.job_type) for processing_step in processing_graph.get_alphabetically_ordered_processing_steps()}, max_age=max_age) except ApiError as e: return get_json_api_error_response(e, max_age=max_age) except Exception as e: return get_json_api_error_response(UnexpectedApiError('Unexpected error.', e), max_age=max_age) return pending_jobs_endpoint # File: dataset-viewer-main/services/admin/src/admin/routes/recreate_dataset.py import logging from typing import Optional, TypedDict from libapi.exceptions import InvalidParameterError, UnexpectedApiError from libapi.request import get_request_parameter from libapi.utils import Endpoint, get_json_api_error_response, get_json_ok_response from libcommon.dtos import Priority from libcommon.exceptions import CustomError from libcommon.operations import delete_dataset, update_dataset from libcommon.storage_client import StorageClient from starlette.requests import Request from starlette.responses import Response from admin.authentication import auth_check class RecreateDatasetReport(TypedDict): status: str dataset: str def recreate_dataset(dataset: str, priority: Priority, hf_endpoint: str, blocked_datasets: list[str], hf_token: Optional[str]=None, storage_clients: Optional[list[StorageClient]]=None) -> RecreateDatasetReport: delete_dataset(dataset=dataset, storage_clients=storage_clients) update_dataset(dataset=dataset, blocked_datasets=blocked_datasets, hf_endpoint=hf_endpoint, hf_token=hf_token, priority=priority, storage_clients=storage_clients) return RecreateDatasetReport(status='ok', dataset=dataset) def create_recreate_dataset_endpoint(hf_endpoint: str, blocked_datasets: list[str], hf_token: Optional[str]=None, external_auth_url: Optional[str]=None, organization: Optional[str]=None, hf_timeout_seconds: Optional[float]=None, storage_clients: Optional[list[StorageClient]]=None) -> Endpoint: async def recreate_dataset_endpoint(request: Request) -> Response: try: dataset = get_request_parameter(request, 'dataset', required=True) try: priority = Priority(get_request_parameter(request, 'priority', default='low')) except ValueError: raise InvalidParameterError(f"Parameter 'priority' should be one of {', '.join((prio.value for prio in Priority))}.") logging.info(f'/recreate-dataset, dataset={dataset}, priority={priority}') await auth_check(external_auth_url=external_auth_url, request=request, organization=organization, hf_timeout_seconds=hf_timeout_seconds) return get_json_ok_response(recreate_dataset(dataset=dataset, priority=priority, blocked_datasets=blocked_datasets, hf_endpoint=hf_endpoint, hf_token=hf_token, storage_clients=storage_clients), max_age=0) except CustomError as e: return get_json_api_error_response(e, max_age=0) except Exception as e: return get_json_api_error_response(UnexpectedApiError('Unexpected error.', e), max_age=0) return recreate_dataset_endpoint # File: dataset-viewer-main/services/api/src/api/app.py import uvicorn from libapi.config import UvicornConfig from libapi.jwt_token import get_jwt_public_keys from libapi.routes.healthcheck import healthcheck_endpoint from libapi.routes.metrics import create_metrics_endpoint from libapi.utils import EXPOSED_HEADERS from libcommon.cloudfront import get_cloudfront_signer from libcommon.log import init_logging from libcommon.processing_graph import processing_graph from libcommon.resources import CacheMongoResource, QueueMongoResource, Resource from libcommon.storage_client import StorageClient from libcommon.url_preparator import URLPreparator from starlette.applications import Starlette from starlette.middleware import Middleware from starlette.middleware.cors import CORSMiddleware from starlette.middleware.gzip import GZipMiddleware from starlette.routing import Route from starlette_prometheus import PrometheusMiddleware from api.config import AppConfig, EndpointConfig from api.routes.endpoint import EndpointsDefinition, create_endpoint def create_app() -> Starlette: app_config = AppConfig.from_env() endpoint_config = EndpointConfig.from_env() return create_app_with_config(app_config=app_config, endpoint_config=endpoint_config) def create_app_with_config(app_config: AppConfig, endpoint_config: EndpointConfig) -> Starlette: init_logging(level=app_config.log.level) endpoints_definition = EndpointsDefinition(processing_graph, endpoint_config) hf_jwt_public_keys = get_jwt_public_keys(algorithm_name=app_config.api.hf_jwt_algorithm, public_key_url=app_config.api.hf_jwt_public_key_url, additional_public_keys=app_config.api.hf_jwt_additional_public_keys, timeout_seconds=app_config.api.hf_timeout_seconds) middleware = [Middleware(CORSMiddleware, allow_origins=['*'], allow_methods=['*'], allow_headers=['*'], allow_credentials=True, expose_headers=EXPOSED_HEADERS), Middleware(GZipMiddleware), Middleware(PrometheusMiddleware, filter_unhandled_paths=True)] cached_assets_storage_client = StorageClient(protocol=app_config.cached_assets.storage_protocol, storage_root=app_config.cached_assets.storage_root, base_url=app_config.cached_assets.base_url, s3_config=app_config.s3) url_signer = get_cloudfront_signer(cloudfront_config=app_config.cloudfront) url_preparator = URLPreparator(url_signer=url_signer) assets_storage_client = StorageClient(protocol=app_config.assets.storage_protocol, storage_root=app_config.assets.storage_root, base_url=app_config.assets.base_url, s3_config=app_config.s3, url_preparator=url_preparator) storage_clients = [cached_assets_storage_client, assets_storage_client] cache_resource = CacheMongoResource(database=app_config.cache.mongo_database, host=app_config.cache.mongo_url) queue_resource = QueueMongoResource(database=app_config.queue.mongo_database, host=app_config.queue.mongo_url) resources: list[Resource] = [cache_resource, queue_resource] if not cache_resource.is_available(): raise RuntimeError('The connection to the cache database could not be established. Exiting.') if not queue_resource.is_available(): raise RuntimeError('The connection to the queue database could not be established. Exiting.') routes = [Route(endpoint_name, endpoint=create_endpoint(endpoint_name=endpoint_name, step_by_input_type=step_by_input_type, hf_endpoint=app_config.common.hf_endpoint, hf_token=app_config.common.hf_token, blocked_datasets=app_config.common.blocked_datasets, assets_storage_client=assets_storage_client, hf_jwt_public_keys=hf_jwt_public_keys, hf_jwt_algorithm=app_config.api.hf_jwt_algorithm, external_auth_url=app_config.api.external_auth_url, hf_timeout_seconds=app_config.api.hf_timeout_seconds, max_age_long=app_config.api.max_age_long, max_age_short=app_config.api.max_age_short, storage_clients=storage_clients)) for (endpoint_name, step_by_input_type) in endpoints_definition.step_by_input_type_and_endpoint.items()] + [Route('/healthcheck', endpoint=healthcheck_endpoint), Route('/metrics', endpoint=create_metrics_endpoint())] return Starlette(routes=routes, middleware=middleware, on_shutdown=[resource.release for resource in resources]) def start() -> None: uvicorn_config = UvicornConfig.from_env() uvicorn.run('app:create_app', host=uvicorn_config.hostname, port=uvicorn_config.port, factory=True, workers=uvicorn_config.num_workers) # File: dataset-viewer-main/services/api/src/api/config.py from collections.abc import Mapping from dataclasses import dataclass, field from libapi.config import ApiConfig from libcommon.config import AssetsConfig, CacheConfig, CachedAssetsConfig, CloudFrontConfig, CommonConfig, LogConfig, QueueConfig, S3Config from libcommon.processing_graph import InputType @dataclass(frozen=True) class AppConfig: api: ApiConfig = field(default_factory=ApiConfig) assets: AssetsConfig = field(default_factory=AssetsConfig) cache: CacheConfig = field(default_factory=CacheConfig) cached_assets: CachedAssetsConfig = field(default_factory=CachedAssetsConfig) cloudfront: CloudFrontConfig = field(default_factory=CloudFrontConfig) common: CommonConfig = field(default_factory=CommonConfig) log: LogConfig = field(default_factory=LogConfig) queue: QueueConfig = field(default_factory=QueueConfig) s3: S3Config = field(default_factory=S3Config) @classmethod def from_env(cls) -> 'AppConfig': common_config = CommonConfig.from_env() return cls(common=common_config, assets=AssetsConfig.from_env(), cache=CacheConfig.from_env(), cached_assets=CachedAssetsConfig.from_env(), cloudfront=CloudFrontConfig.from_env(), log=LogConfig.from_env(), queue=QueueConfig.from_env(), api=ApiConfig.from_env(hf_endpoint=common_config.hf_endpoint), s3=S3Config.from_env()) ProcessingStepNameByInputType = Mapping[InputType, str] ProcessingStepNameByInputTypeAndEndpoint = Mapping[str, ProcessingStepNameByInputType] @dataclass(frozen=True) class EndpointConfig: processing_step_name_by_input_type_and_endpoint: ProcessingStepNameByInputTypeAndEndpoint = field(default_factory=lambda : {'/splits': {'dataset': 'dataset-split-names', 'config': 'config-split-names'}, '/first-rows': {'split': 'split-first-rows'}, '/parquet': {'dataset': 'dataset-parquet', 'config': 'config-parquet'}, '/info': {'dataset': 'dataset-info', 'config': 'config-info'}, '/size': {'dataset': 'dataset-size', 'config': 'config-size'}, '/opt-in-out-urls': {'dataset': 'dataset-opt-in-out-urls-count', 'config': 'config-opt-in-out-urls-count', 'split': 'split-opt-in-out-urls-count'}, '/presidio-entities': {'dataset': 'dataset-presidio-entities-count'}, '/is-valid': {'dataset': 'dataset-is-valid', 'config': 'config-is-valid', 'split': 'split-is-valid'}, '/statistics': {'split': 'split-descriptive-statistics'}, '/compatible-libraries': {'dataset': 'dataset-compatible-libraries'}, '/croissant-crumbs': {'dataset': 'dataset-croissant-crumbs'}, '/hub-cache': {'dataset': 'dataset-hub-cache'}}) @classmethod def from_env(cls) -> 'EndpointConfig': return cls() # File: dataset-viewer-main/services/api/src/api/routes/endpoint.py import logging from collections.abc import Mapping from http import HTTPStatus from typing import Optional, TypedDict from libapi.authentication import auth_check from libapi.exceptions import ApiError, MissingRequiredParameterError, UnexpectedApiError from libapi.request import get_request_parameter from libapi.utils import Endpoint, are_valid_parameters, get_cache_entry_from_step, get_json_api_error_response, get_json_error_response, get_json_ok_response from libcommon.croissant_utils import truncate_features_from_croissant_crumbs_response from libcommon.exceptions import NotSupportedError from libcommon.processing_graph import InputType, ProcessingGraph, ProcessingStep from libcommon.prometheus import StepProfiler from libcommon.storage_client import StorageClient from starlette.requests import Request from starlette.responses import Response from api.config import EndpointConfig StepByInputType = Mapping[InputType, ProcessingStep] StepByInputTypeAndEndpoint = Mapping[str, StepByInputType] class EndpointsDefinition: step_by_input_type_and_endpoint: StepByInputTypeAndEndpoint def __init__(self, graph: ProcessingGraph, endpoint_config: EndpointConfig): self.step_by_input_type_and_endpoint = {endpoint: {input_type: graph.get_processing_step(processing_step_name) for (input_type, processing_step_name) in processing_step_name_by_input_type.items()} for (endpoint, processing_step_name_by_input_type) in endpoint_config.processing_step_name_by_input_type_and_endpoint.items()} class OptInOutUrlsCountResponse(TypedDict): urls_columns: list[str] num_opt_in_urls: int num_opt_out_urls: int num_urls: int num_scanned_rows: int has_urls_columns: bool full_scan: Optional[bool] HARD_CODED_OPT_IN_OUT_URLS = {'laion/relaion2B-en-research-safe': OptInOutUrlsCountResponse(urls_columns=['URL'], num_opt_in_urls=16, num_opt_out_urls=43913814, num_urls=2097653553, num_scanned_rows=2097693557, has_urls_columns=True, full_scan=True), 'kakaobrain/coyo-700m': OptInOutUrlsCountResponse(urls_columns=['url'], num_opt_in_urls=2, num_opt_out_urls=4691511, num_urls=746972269, num_scanned_rows=0, has_urls_columns=True, full_scan=True)} def get_input_types_by_priority(step_by_input_type: StepByInputType) -> list[InputType]: input_type_order: list[InputType] = ['split', 'config', 'dataset'] return [input_type for input_type in input_type_order if input_type in step_by_input_type] def create_endpoint(endpoint_name: str, step_by_input_type: StepByInputType, hf_endpoint: str, blocked_datasets: list[str], assets_storage_client: StorageClient, hf_token: Optional[str]=None, hf_jwt_public_keys: Optional[list[str]]=None, hf_jwt_algorithm: Optional[str]=None, external_auth_url: Optional[str]=None, hf_timeout_seconds: Optional[float]=None, max_age_long: int=0, max_age_short: int=0, storage_clients: Optional[list[StorageClient]]=None) -> Endpoint: async def processing_step_endpoint(request: Request) -> Response: method = f'processing_step_endpoint: {endpoint_name}' revision: Optional[str] = None with StepProfiler(method=method, step='all'): try: with StepProfiler(method=method, step='validate parameters and get processing steps'): dataset = get_request_parameter(request, 'dataset') config = get_request_parameter(request, 'config') or None split = get_request_parameter(request, 'split') or None logging.debug(f'endpoint_name={endpoint_name!r} dataset={dataset!r} config={config!r} split={split!r}') (dataset, config, split, input_type) = validate_parameters(dataset, config, split, step_by_input_type) processing_step = step_by_input_type[input_type] full = get_request_parameter(request, 'full', default='true').lower() != 'false' with StepProfiler(method=method, step='check authentication'): await auth_check(dataset, external_auth_url=external_auth_url, request=request, hf_jwt_public_keys=hf_jwt_public_keys, hf_jwt_algorithm=hf_jwt_algorithm, hf_timeout_seconds=hf_timeout_seconds) with StepProfiler(method=method, step='get cache entry'): if endpoint_name == '/opt-in-out-urls' and input_type == 'dataset' and (dataset in HARD_CODED_OPT_IN_OUT_URLS): return get_json_ok_response(content=HARD_CODED_OPT_IN_OUT_URLS[dataset], max_age=max_age_long, revision=revision) result = get_cache_entry_from_step(processing_step_name=processing_step.name, dataset=dataset, config=config, split=split, hf_endpoint=hf_endpoint, hf_token=hf_token, blocked_datasets=blocked_datasets, hf_timeout_seconds=hf_timeout_seconds, storage_clients=storage_clients) content = result['content'] http_status = result['http_status'] error_code = result['error_code'] revision = result['dataset_git_revision'] if http_status == HTTPStatus.OK: if endpoint_name == '/first-rows' and assets_storage_client.url_preparator: with StepProfiler(method=method, step='prepare assets urls'): assets_storage_client.url_preparator.prepare_urls_in_first_rows_in_place(content, revision=revision) elif endpoint_name == '/croissant-crumbs' and (not full): with StepProfiler(method=method, step='truncate features from croissant-crumbs response'): truncate_features_from_croissant_crumbs_response(content) with StepProfiler(method=method, step='generate OK response'): return get_json_ok_response(content=content, max_age=max_age_long, revision=revision) with StepProfiler(method=method, step='generate error response'): return get_json_error_response(content=content, status_code=http_status, max_age=max_age_short, error_code=error_code, revision=revision) except Exception as e: error = e if isinstance(e, (ApiError, NotSupportedError)) else UnexpectedApiError('Unexpected error.', e) with StepProfiler(method=method, step='generate API error response'): return get_json_api_error_response(error=error, max_age=max_age_short, revision=revision) return processing_step_endpoint def validate_parameters(dataset: str, config: Optional[str], split: Optional[str], step_by_input_type: StepByInputType) -> tuple[str, Optional[str], Optional[str], InputType]: input_types = get_input_types_by_priority(step_by_input_type=step_by_input_type) error_message = 'No processing steps supported for parameters' for input_type in input_types: if input_type == 'split': if are_valid_parameters([dataset, config, split]): return (dataset, config, split, input_type) else: error_message = "Parameters 'dataset', 'config' and 'split' are required" elif input_type == 'config': if are_valid_parameters([dataset, config]): return (dataset, config, None, input_type) else: error_message = "Parameters 'dataset' and 'config' are required" elif input_type == 'dataset': if are_valid_parameters([dataset]): return (dataset, None, None, input_type) else: error_message = "Parameter 'dataset' is required" raise MissingRequiredParameterError(error_message) # File: dataset-viewer-main/services/rows/src/rows/app.py import uvicorn from libapi.config import UvicornConfig from libapi.jwt_token import get_jwt_public_keys from libapi.routes.healthcheck import healthcheck_endpoint from libapi.routes.metrics import create_metrics_endpoint from libapi.utils import EXPOSED_HEADERS from libcommon.cloudfront import get_cloudfront_signer from libcommon.log import init_logging from libcommon.resources import CacheMongoResource, QueueMongoResource, Resource from libcommon.storage import exists, init_parquet_metadata_dir from libcommon.storage_client import StorageClient from libcommon.url_preparator import URLPreparator from starlette.applications import Starlette from starlette.middleware import Middleware from starlette.middleware.cors import CORSMiddleware from starlette.middleware.gzip import GZipMiddleware from starlette.routing import Route from starlette_prometheus import PrometheusMiddleware from rows.config import AppConfig from rows.routes.rows import create_rows_endpoint def create_app() -> Starlette: app_config = AppConfig.from_env() return create_app_with_config(app_config=app_config) def create_app_with_config(app_config: AppConfig) -> Starlette: init_logging(level=app_config.log.level) parquet_metadata_directory = init_parquet_metadata_dir(directory=app_config.parquet_metadata.storage_directory) if not exists(parquet_metadata_directory): raise RuntimeError('The parquet metadata storage directory could not be accessed. Exiting.') hf_jwt_public_keys = get_jwt_public_keys(algorithm_name=app_config.api.hf_jwt_algorithm, public_key_url=app_config.api.hf_jwt_public_key_url, additional_public_keys=app_config.api.hf_jwt_additional_public_keys, timeout_seconds=app_config.api.hf_timeout_seconds) middleware = [Middleware(CORSMiddleware, allow_origins=['*'], allow_methods=['*'], allow_headers=['*'], allow_credentials=True, expose_headers=EXPOSED_HEADERS), Middleware(GZipMiddleware), Middleware(PrometheusMiddleware, filter_unhandled_paths=True)] cache_resource = CacheMongoResource(database=app_config.cache.mongo_database, host=app_config.cache.mongo_url) queue_resource = QueueMongoResource(database=app_config.queue.mongo_database, host=app_config.queue.mongo_url) url_signer = get_cloudfront_signer(cloudfront_config=app_config.cloudfront) url_preparator = URLPreparator(url_signer=url_signer) cached_assets_storage_client = StorageClient(protocol=app_config.cached_assets.storage_protocol, storage_root=app_config.cached_assets.storage_root, base_url=app_config.cached_assets.base_url, s3_config=app_config.s3, url_preparator=url_preparator) assets_storage_client = StorageClient(protocol=app_config.assets.storage_protocol, storage_root=app_config.assets.storage_root, base_url=app_config.assets.base_url, s3_config=app_config.s3) storage_clients = [cached_assets_storage_client, assets_storage_client] resources: list[Resource] = [cache_resource, queue_resource] if not cache_resource.is_available(): raise RuntimeError('The connection to the cache database could not be established. Exiting.') if not queue_resource.is_available(): raise RuntimeError('The connection to the queue database could not be established. Exiting.') routes = [Route('/healthcheck', endpoint=healthcheck_endpoint), Route('/metrics', endpoint=create_metrics_endpoint()), Route('/rows', endpoint=create_rows_endpoint(cached_assets_storage_client=cached_assets_storage_client, parquet_metadata_directory=parquet_metadata_directory, max_arrow_data_in_memory=app_config.rows_index.max_arrow_data_in_memory, hf_endpoint=app_config.common.hf_endpoint, hf_token=app_config.common.hf_token, blocked_datasets=app_config.common.blocked_datasets, hf_jwt_public_keys=hf_jwt_public_keys, hf_jwt_algorithm=app_config.api.hf_jwt_algorithm, external_auth_url=app_config.api.external_auth_url, hf_timeout_seconds=app_config.api.hf_timeout_seconds, max_age_long=app_config.api.max_age_long, max_age_short=app_config.api.max_age_short, storage_clients=storage_clients))] return Starlette(routes=routes, middleware=middleware, on_shutdown=[resource.release for resource in resources]) def start() -> None: uvicorn_config = UvicornConfig.from_env() uvicorn.run('app:create_app', host=uvicorn_config.hostname, port=uvicorn_config.port, factory=True, workers=uvicorn_config.num_workers) # File: dataset-viewer-main/services/rows/src/rows/config.py from dataclasses import dataclass, field from libapi.config import ApiConfig from libcommon.config import AssetsConfig, CacheConfig, CachedAssetsConfig, CloudFrontConfig, CommonConfig, LogConfig, ParquetMetadataConfig, QueueConfig, RowsIndexConfig, S3Config @dataclass(frozen=True) class AppConfig: api: ApiConfig = field(default_factory=ApiConfig) assets: AssetsConfig = field(default_factory=AssetsConfig) cache: CacheConfig = field(default_factory=CacheConfig) cached_assets: CachedAssetsConfig = field(default_factory=CachedAssetsConfig) cloudfront: CloudFrontConfig = field(default_factory=CloudFrontConfig) common: CommonConfig = field(default_factory=CommonConfig) log: LogConfig = field(default_factory=LogConfig) parquet_metadata: ParquetMetadataConfig = field(default_factory=ParquetMetadataConfig) queue: QueueConfig = field(default_factory=QueueConfig) rows_index: RowsIndexConfig = field(default_factory=RowsIndexConfig) s3: S3Config = field(default_factory=S3Config) @classmethod def from_env(cls) -> 'AppConfig': common_config = CommonConfig.from_env() return cls(api=ApiConfig.from_env(hf_endpoint=common_config.hf_endpoint), assets=AssetsConfig.from_env(), cache=CacheConfig.from_env(), cached_assets=CachedAssetsConfig.from_env(), cloudfront=CloudFrontConfig.from_env(), common=common_config, log=LogConfig.from_env(), parquet_metadata=ParquetMetadataConfig.from_env(), queue=QueueConfig.from_env(), rows_index=RowsIndexConfig.from_env(), s3=S3Config.from_env()) # File: dataset-viewer-main/services/rows/src/rows/routes/rows.py import logging from typing import Literal, Optional, Union from fsspec.implementations.http import HTTPFileSystem from libapi.authentication import auth_check from libapi.exceptions import ApiError, TooBigContentError, UnexpectedApiError from libapi.request import get_request_parameter, get_request_parameter_length, get_request_parameter_offset from libapi.response import create_response from libapi.utils import Endpoint, get_json_api_error_response, get_json_error_response, get_json_ok_response, try_backfill_dataset_then_raise from libcommon.constants import CONFIG_PARQUET_METADATA_KIND from libcommon.parquet_utils import Indexer, TooBigRows from libcommon.prometheus import StepProfiler from libcommon.simple_cache import CachedArtifactError, CachedArtifactNotFoundError from libcommon.storage import StrPath from libcommon.storage_client import StorageClient from libcommon.viewer_utils.features import UNSUPPORTED_FEATURES from starlette.requests import Request from starlette.responses import Response logger = logging.getLogger(__name__) ALL_COLUMNS_SUPPORTED_DATASETS_ALLOW_LIST: Union[Literal['all'], list[str]] = ['halabi2016/arabic_speech_corpus'] def create_rows_endpoint(cached_assets_storage_client: StorageClient, parquet_metadata_directory: StrPath, max_arrow_data_in_memory: int, hf_endpoint: str, blocked_datasets: list[str], hf_token: Optional[str]=None, hf_jwt_public_keys: Optional[list[str]]=None, hf_jwt_algorithm: Optional[str]=None, external_auth_url: Optional[str]=None, hf_timeout_seconds: Optional[float]=None, max_age_long: int=0, max_age_short: int=0, storage_clients: Optional[list[StorageClient]]=None) -> Endpoint: indexer = Indexer(hf_token=hf_token, parquet_metadata_directory=parquet_metadata_directory, httpfs=HTTPFileSystem(headers={'authorization': f'Bearer {hf_token}'}), max_arrow_data_in_memory=max_arrow_data_in_memory, unsupported_features=UNSUPPORTED_FEATURES, all_columns_supported_datasets_allow_list=ALL_COLUMNS_SUPPORTED_DATASETS_ALLOW_LIST) async def rows_endpoint(request: Request) -> Response: await indexer.httpfs.set_session() revision: Optional[str] = None with StepProfiler(method='rows_endpoint', step='all'): try: with StepProfiler(method='rows_endpoint', step='validate parameters'): dataset = get_request_parameter(request, 'dataset', required=True) config = get_request_parameter(request, 'config', required=True) split = get_request_parameter(request, 'split', required=True) offset = get_request_parameter_offset(request) length = get_request_parameter_length(request) logging.info(f'/rows, dataset={dataset!r}, config={config!r}, split={split!r}, offset={offset!r}, length={length!r}') with StepProfiler(method='rows_endpoint', step='check authentication'): await auth_check(dataset=dataset, external_auth_url=external_auth_url, request=request, hf_jwt_public_keys=hf_jwt_public_keys, hf_jwt_algorithm=hf_jwt_algorithm, hf_timeout_seconds=hf_timeout_seconds) try: with StepProfiler(method='rows_endpoint', step='get row groups index'): rows_index = indexer.get_rows_index(dataset=dataset, config=config, split=split) revision = rows_index.revision with StepProfiler(method='rows_endpoint', step='query the rows'): try: truncated_columns: list[str] = [] if dataset == 'Major-TOM/Core-S2L2A': (pa_table, truncated_columns) = rows_index.query_truncated_binary(offset=offset, length=length) else: pa_table = rows_index.query(offset=offset, length=length) except TooBigRows as err: raise TooBigContentError(str(err)) from None with StepProfiler(method='rows_endpoint', step='transform to a list'): response = await create_response(dataset=dataset, revision=revision, config=config, split=split, storage_client=cached_assets_storage_client, pa_table=pa_table, offset=offset, features=rows_index.parquet_index.features, unsupported_columns=rows_index.parquet_index.unsupported_columns, partial=rows_index.parquet_index.partial, num_rows_total=rows_index.parquet_index.num_rows_total, truncated_columns=truncated_columns) except CachedArtifactNotFoundError: with StepProfiler(method='rows_endpoint', step='try backfill dataset'): try_backfill_dataset_then_raise(processing_step_name=CONFIG_PARQUET_METADATA_KIND, dataset=dataset, hf_endpoint=hf_endpoint, hf_timeout_seconds=hf_timeout_seconds, hf_token=hf_token, blocked_datasets=blocked_datasets, storage_clients=storage_clients) with StepProfiler(method='rows_endpoint', step='generate the OK response'): return get_json_ok_response(content=response, max_age=max_age_long, revision=revision) except CachedArtifactError as e: content = e.cache_entry_with_details['content'] http_status = e.cache_entry_with_details['http_status'] error_code = e.cache_entry_with_details['error_code'] return get_json_error_response(content=content, status_code=http_status, max_age=max_age_short, error_code=error_code, revision=revision) except Exception as e: error = e if isinstance(e, ApiError) else UnexpectedApiError('Unexpected error.', e) with StepProfiler(method='rows_endpoint', step='generate API error response'): return get_json_api_error_response(error=error, max_age=max_age_short, revision=revision) return rows_endpoint # File: dataset-viewer-main/services/search/src/search/app.py import uvicorn from libapi.config import UvicornConfig from libapi.jwt_token import get_jwt_public_keys from libapi.routes.healthcheck import healthcheck_endpoint from libapi.routes.metrics import create_metrics_endpoint from libapi.utils import EXPOSED_HEADERS from libcommon.cloudfront import get_cloudfront_signer from libcommon.log import init_logging from libcommon.resources import CacheMongoResource, QueueMongoResource, Resource from libcommon.storage import exists, init_duckdb_index_cache_dir from libcommon.storage_client import StorageClient from libcommon.url_preparator import URLPreparator from starlette.applications import Starlette from starlette.middleware import Middleware from starlette.middleware.cors import CORSMiddleware from starlette.middleware.gzip import GZipMiddleware from starlette.routing import Route from starlette_prometheus import PrometheusMiddleware from search.config import AppConfig from search.routes.filter import create_filter_endpoint from search.routes.search import create_search_endpoint def create_app() -> Starlette: app_config = AppConfig.from_env() return create_app_with_config(app_config=app_config) def create_app_with_config(app_config: AppConfig) -> Starlette: init_logging(level=app_config.log.level) duckdb_index_cache_directory = init_duckdb_index_cache_dir(directory=app_config.duckdb_index.cache_directory) if not exists(duckdb_index_cache_directory): raise RuntimeError('The duckdb_index cache directory could not be accessed. Exiting.') hf_jwt_public_keys = get_jwt_public_keys(algorithm_name=app_config.api.hf_jwt_algorithm, public_key_url=app_config.api.hf_jwt_public_key_url, additional_public_keys=app_config.api.hf_jwt_additional_public_keys, timeout_seconds=app_config.api.hf_timeout_seconds) middleware = [Middleware(CORSMiddleware, allow_origins=['*'], allow_methods=['*'], allow_headers=['*'], allow_credentials=True, expose_headers=EXPOSED_HEADERS), Middleware(GZipMiddleware), Middleware(PrometheusMiddleware, filter_unhandled_paths=True)] cache_resource = CacheMongoResource(database=app_config.cache.mongo_database, host=app_config.cache.mongo_url) queue_resource = QueueMongoResource(database=app_config.queue.mongo_database, host=app_config.queue.mongo_url) url_signer = get_cloudfront_signer(cloudfront_config=app_config.cloudfront) url_preparator = URLPreparator(url_signer=url_signer) cached_assets_storage_client = StorageClient(protocol=app_config.cached_assets.storage_protocol, storage_root=app_config.cached_assets.storage_root, base_url=app_config.cached_assets.base_url, s3_config=app_config.s3, url_preparator=url_preparator) assets_storage_client = StorageClient(protocol=app_config.assets.storage_protocol, storage_root=app_config.assets.storage_root, base_url=app_config.assets.base_url, s3_config=app_config.s3) storage_clients = [cached_assets_storage_client, assets_storage_client] resources: list[Resource] = [cache_resource, queue_resource] if not cache_resource.is_available(): raise RuntimeError('The connection to the cache database could not be established. Exiting.') if not queue_resource.is_available(): raise RuntimeError('The connection to the queue database could not be established. Exiting.') routes = [Route('/healthcheck', endpoint=healthcheck_endpoint), Route('/metrics', endpoint=create_metrics_endpoint()), Route('/search', endpoint=create_search_endpoint(duckdb_index_file_directory=duckdb_index_cache_directory, cached_assets_storage_client=cached_assets_storage_client, target_revision=app_config.duckdb_index.target_revision, hf_endpoint=app_config.common.hf_endpoint, hf_token=app_config.common.hf_token, blocked_datasets=app_config.common.blocked_datasets, hf_jwt_public_keys=hf_jwt_public_keys, hf_jwt_algorithm=app_config.api.hf_jwt_algorithm, external_auth_url=app_config.api.external_auth_url, hf_timeout_seconds=app_config.api.hf_timeout_seconds, max_age_long=app_config.api.max_age_long, max_age_short=app_config.api.max_age_short, storage_clients=storage_clients, extensions_directory=app_config.duckdb_index.extensions_directory, clean_cache_proba=app_config.duckdb_index.clean_cache_proba, expiredTimeIntervalSeconds=app_config.duckdb_index.expired_time_interval_seconds)), Route('/filter', endpoint=create_filter_endpoint(duckdb_index_file_directory=duckdb_index_cache_directory, target_revision=app_config.duckdb_index.target_revision, cached_assets_storage_client=cached_assets_storage_client, hf_endpoint=app_config.common.hf_endpoint, hf_token=app_config.common.hf_token, blocked_datasets=app_config.common.blocked_datasets, hf_jwt_public_keys=hf_jwt_public_keys, hf_jwt_algorithm=app_config.api.hf_jwt_algorithm, external_auth_url=app_config.api.external_auth_url, hf_timeout_seconds=app_config.api.hf_timeout_seconds, max_age_long=app_config.api.max_age_long, max_age_short=app_config.api.max_age_short, storage_clients=storage_clients, extensions_directory=app_config.duckdb_index.extensions_directory, clean_cache_proba=app_config.duckdb_index.clean_cache_proba, expiredTimeIntervalSeconds=app_config.duckdb_index.expired_time_interval_seconds))] return Starlette(routes=routes, middleware=middleware, on_shutdown=[resource.release for resource in resources]) def start() -> None: uvicorn_config = UvicornConfig.from_env() uvicorn.run('app:create_app', host=uvicorn_config.hostname, port=uvicorn_config.port, factory=True, workers=uvicorn_config.num_workers) # File: dataset-viewer-main/services/search/src/search/config.py from dataclasses import dataclass, field from typing import Optional from environs import Env from libapi.config import ApiConfig from libcommon.config import AssetsConfig, CacheConfig, CachedAssetsConfig, CloudFrontConfig, CommonConfig, LogConfig, QueueConfig, S3Config DUCKDB_INDEX_CACHE_DIRECTORY = None DUCKDB_INDEX_CACHE_CLEAN_CACHE_PROBA = 0.05 DUCKDB_INDEX_CACHE_EXPIRED_TIME_INTERVAL_SECONDS = 3600 DUCKDB_INDEX_TARGET_REVISION = 'refs/convert/duckdb' DUCKDB_INDEX_EXTENSIONS_DIRECTORY: Optional[str] = None @dataclass(frozen=True) class DuckDbIndexConfig: cache_directory: Optional[str] = DUCKDB_INDEX_CACHE_DIRECTORY clean_cache_proba: float = DUCKDB_INDEX_CACHE_CLEAN_CACHE_PROBA expired_time_interval_seconds: int = DUCKDB_INDEX_CACHE_EXPIRED_TIME_INTERVAL_SECONDS target_revision: str = DUCKDB_INDEX_TARGET_REVISION extensions_directory: Optional[str] = DUCKDB_INDEX_EXTENSIONS_DIRECTORY @classmethod def from_env(cls) -> 'DuckDbIndexConfig': env = Env(expand_vars=True) with env.prefixed('DUCKDB_INDEX_'): return cls(cache_directory=env.str(name='CACHE_DIRECTORY', default=DUCKDB_INDEX_CACHE_DIRECTORY), clean_cache_proba=env.float(name='CACHE_CLEAN_CACHE_PROBA', default=DUCKDB_INDEX_CACHE_CLEAN_CACHE_PROBA), expired_time_interval_seconds=env.int(name='CACHE_EXPIRED_TIME_INTERVAL_SECONDS', default=DUCKDB_INDEX_CACHE_EXPIRED_TIME_INTERVAL_SECONDS), target_revision=env.str(name='TARGET_REVISION', default=DUCKDB_INDEX_TARGET_REVISION), extensions_directory=env.str(name='EXTENSIONS_DIRECTORY', default=DUCKDB_INDEX_EXTENSIONS_DIRECTORY)) @dataclass(frozen=True) class AppConfig: api: ApiConfig = field(default_factory=ApiConfig) assets: AssetsConfig = field(default_factory=AssetsConfig) cache: CacheConfig = field(default_factory=CacheConfig) cached_assets: CachedAssetsConfig = field(default_factory=CachedAssetsConfig) cloudfront: CloudFrontConfig = field(default_factory=CloudFrontConfig) common: CommonConfig = field(default_factory=CommonConfig) log: LogConfig = field(default_factory=LogConfig) queue: QueueConfig = field(default_factory=QueueConfig) duckdb_index: DuckDbIndexConfig = field(default_factory=DuckDbIndexConfig) s3: S3Config = field(default_factory=S3Config) @classmethod def from_env(cls) -> 'AppConfig': common_config = CommonConfig.from_env() return cls(common=common_config, assets=AssetsConfig.from_env(), cache=CacheConfig.from_env(), cached_assets=CachedAssetsConfig.from_env(), cloudfront=CloudFrontConfig.from_env(), log=LogConfig.from_env(), queue=QueueConfig.from_env(), api=ApiConfig.from_env(hf_endpoint=common_config.hf_endpoint), duckdb_index=DuckDbIndexConfig.from_env(), s3=S3Config.from_env()) # File: dataset-viewer-main/services/search/src/search/duckdb_connection.py from typing import Any, Optional import duckdb LOAD_FTS_SAFE_COMMAND = "INSTALL 'fts'; LOAD 'fts'; SET enable_external_access=false; SET lock_configuration=true;" SET_EXTENSIONS_DIRECTORY_COMMAND = "SET extension_directory='{directory}';" def duckdb_connect(extensions_directory: Optional[str]=None, **kwargs: Any) -> duckdb.DuckDBPyConnection: con = duckdb.connect(read_only=True, **kwargs) if extensions_directory is not None: con.execute(SET_EXTENSIONS_DIRECTORY_COMMAND.format(directory=extensions_directory)) con.sql(LOAD_FTS_SAFE_COMMAND) return con # File: dataset-viewer-main/services/search/src/search/routes/filter.py import logging import random import re from http import HTTPStatus from typing import Optional import anyio import duckdb import pyarrow as pa from datasets import Features from libapi.authentication import auth_check from libapi.duckdb import get_cache_entry_from_duckdb_index_job, get_index_file_location_and_download_if_missing from libapi.exceptions import ApiError, InvalidParameterError, UnexpectedApiError from libapi.request import get_request_parameter, get_request_parameter_length, get_request_parameter_offset from libapi.response import create_response from libapi.utils import Endpoint, get_json_api_error_response, get_json_error_response, get_json_ok_response from libcommon.duckdb_utils import duckdb_index_is_partial from libcommon.prometheus import StepProfiler from libcommon.storage import StrPath, clean_dir from libcommon.storage_client import StorageClient from libcommon.viewer_utils.features import get_supported_unsupported_columns from starlette.requests import Request from starlette.responses import Response from search.duckdb_connection import duckdb_connect FILTER_QUERY = ' SELECT {columns}\n FROM data\n {where}\n {orderby}\n LIMIT {limit}\n OFFSET {offset}' FILTER_COUNT_QUERY = ' SELECT COUNT(*)\n FROM data\n {where}' SQL_INVALID_SYMBOLS = '|'.join([';', '--', '/\\*', '\\*/']) SQL_INVALID_SYMBOLS_PATTERN = re.compile(f'(?:{SQL_INVALID_SYMBOLS})', flags=re.IGNORECASE) logger = logging.getLogger(__name__) def create_filter_endpoint(duckdb_index_file_directory: StrPath, target_revision: str, cached_assets_storage_client: StorageClient, blocked_datasets: list[str], hf_endpoint: str, hf_token: Optional[str]=None, hf_jwt_public_keys: Optional[list[str]]=None, hf_jwt_algorithm: Optional[str]=None, external_auth_url: Optional[str]=None, hf_timeout_seconds: Optional[float]=None, max_age_long: int=0, max_age_short: int=0, storage_clients: Optional[list[StorageClient]]=None, extensions_directory: Optional[str]=None, clean_cache_proba: float=0.0, expiredTimeIntervalSeconds: int=60) -> Endpoint: async def filter_endpoint(request: Request) -> Response: revision: Optional[str] = None with StepProfiler(method='filter_endpoint', step='all'): try: with StepProfiler(method='filter_endpoint', step='validate parameters'): dataset = get_request_parameter(request, 'dataset', required=True) config = get_request_parameter(request, 'config', required=True) split = get_request_parameter(request, 'split', required=True) where = get_request_parameter(request, 'where') validate_query_parameter(where, 'where') orderby = get_request_parameter(request, 'orderby') validate_query_parameter(orderby, 'orderby') offset = get_request_parameter_offset(request) length = get_request_parameter_length(request) logger.info(f'/filter, dataset={dataset!r}, config={config!r}, split={split!r}, where={where!r}, orderby={orderby!r}, offset={offset!r}, length={length!r}') with StepProfiler(method='filter_endpoint', step='check authentication'): await auth_check(dataset=dataset, external_auth_url=external_auth_url, request=request, hf_jwt_public_keys=hf_jwt_public_keys, hf_jwt_algorithm=hf_jwt_algorithm, hf_timeout_seconds=hf_timeout_seconds) with StepProfiler(method='filter_endpoint', step='validate indexing was done'): duckdb_index_cache_entry = get_cache_entry_from_duckdb_index_job(dataset=dataset, config=config, split=split, hf_endpoint=hf_endpoint, hf_token=hf_token, hf_timeout_seconds=hf_timeout_seconds, blocked_datasets=blocked_datasets, storage_clients=storage_clients) revision = duckdb_index_cache_entry['dataset_git_revision'] if duckdb_index_cache_entry['http_status'] != HTTPStatus.OK: return get_json_error_response(content=duckdb_index_cache_entry['content'], status_code=duckdb_index_cache_entry['http_status'], max_age=max_age_short, error_code=duckdb_index_cache_entry['error_code'], revision=revision) url = duckdb_index_cache_entry['content']['url'] filename = duckdb_index_cache_entry['content']['filename'] index_size = duckdb_index_cache_entry['content']['size'] partial = duckdb_index_is_partial(url) with StepProfiler(method='filter_endpoint', step='download index file if missing'): index_file_location = await get_index_file_location_and_download_if_missing(duckdb_index_file_directory=duckdb_index_file_directory, dataset=dataset, config=config, split=split, revision=revision, filename=filename, size_bytes=index_size, url=url, target_revision=target_revision, hf_token=hf_token) with StepProfiler(method='filter_endpoint', step='get features'): features = Features.from_dict(duckdb_index_cache_entry['content']['features']) with StepProfiler(method='filter_endpoint', step='get supported and unsupported columns'): (supported_columns, unsupported_columns) = get_supported_unsupported_columns(features) with StepProfiler(method='filter_endpoint', step='execute filter query'): (num_rows_total, pa_table) = await anyio.to_thread.run_sync(execute_filter_query, index_file_location, supported_columns, where, orderby, length, offset, extensions_directory) if random.random() < clean_cache_proba: with StepProfiler(method='filter_endpoint', step='clean old indexes'): clean_dir(duckdb_index_file_directory, expiredTimeIntervalSeconds) with StepProfiler(method='filter_endpoint', step='create response'): response = await create_response(dataset=dataset, revision=revision, config=config, split=split, storage_client=cached_assets_storage_client, pa_table=pa_table, offset=offset, features=features or Features.from_arrow_schema(pa_table.schema), unsupported_columns=unsupported_columns, num_rows_total=num_rows_total, partial=partial, use_row_idx_column=True) with StepProfiler(method='filter_endpoint', step='generate the OK response'): return get_json_ok_response(content=response, max_age=max_age_long, revision=revision) except Exception as e: error = e if isinstance(e, ApiError) else UnexpectedApiError('Unexpected error.', e) with StepProfiler(method='filter_endpoint', step='generate API error response'): return get_json_api_error_response(error=error, max_age=max_age_short, revision=revision) return filter_endpoint def execute_filter_query(index_file_location: str, columns: list[str], where: str, orderby: str, limit: int, offset: int, extensions_directory: Optional[str]=None) -> tuple[int, pa.Table]: with duckdb_connect(extensions_directory=extensions_directory, database=index_file_location) as con: filter_query = FILTER_QUERY.format(columns=','.join([f'"{column}"' for column in columns]), where=f'WHERE {where}' if where else '', orderby=f'ORDER BY {orderby}' if orderby else '', limit=limit, offset=offset) filter_count_query = FILTER_COUNT_QUERY.format(where=f'WHERE {where}' if where else '') try: pa_table = con.sql(filter_query).arrow() num_rows_total = con.sql(filter_count_query).fetchall()[0][0] except duckdb.Error as err: raise InvalidParameterError(message='A query parameter is invalid') from err return (num_rows_total, pa_table) def validate_query_parameter(parameter_value: str, parameter_name: str) -> None: if SQL_INVALID_SYMBOLS_PATTERN.search(parameter_value): raise InvalidParameterError(message=f"Parameter '{parameter_name}' contains invalid symbols") # File: dataset-viewer-main/services/search/src/search/routes/search.py import logging import random from http import HTTPStatus from typing import Optional import anyio import pyarrow as pa from datasets import Features from libapi.authentication import auth_check from libapi.duckdb import get_cache_entry_from_duckdb_index_job, get_index_file_location_and_download_if_missing from libapi.exceptions import ApiError, SearchFeatureNotAvailableError, UnexpectedApiError from libapi.request import get_request_parameter, get_request_parameter_length, get_request_parameter_offset from libapi.utils import Endpoint, get_json_api_error_response, get_json_error_response, get_json_ok_response, to_rows_list from libcommon.constants import HF_FTS_SCORE, MAX_NUM_ROWS_PER_PAGE, ROW_IDX_COLUMN from libcommon.dtos import PaginatedResponse from libcommon.duckdb_utils import duckdb_index_is_partial from libcommon.prometheus import StepProfiler from libcommon.storage import StrPath, clean_dir from libcommon.storage_client import StorageClient from libcommon.viewer_utils.features import get_supported_unsupported_columns, to_features_list from starlette.requests import Request from starlette.responses import Response from search.duckdb_connection import duckdb_connect logger = logging.getLogger(__name__) FTS_STAGE_TABLE_COMMAND = f'SELECT * FROM (SELECT {ROW_IDX_COLUMN}, fts_main_data.match_bm25({ROW_IDX_COLUMN}, ?) AS {HF_FTS_SCORE} FROM data) A WHERE {HF_FTS_SCORE} IS NOT NULL;' JOIN_STAGE_AND_DATA_COMMAND = 'SELECT {columns} FROM fts_stage_table JOIN data USING({row_idx_column}) ORDER BY fts_stage_table.{hf_fts_score} DESC;' def full_text_search(index_file_location: str, columns: list[str], query: str, offset: int, length: int, extensions_directory: Optional[str]=None) -> tuple[int, pa.Table]: with duckdb_connect(extensions_directory=extensions_directory, database=index_file_location) as con: fts_stage_table = con.execute(query=FTS_STAGE_TABLE_COMMAND, parameters=[query]).arrow() num_rows_total = fts_stage_table.num_rows logging.info(f'got num_rows_total={num_rows_total!r} results for query={query!r} using offset={offset!r} length={length!r}') fts_stage_table = fts_stage_table.sort_by([(HF_FTS_SCORE, 'descending')]).slice(offset, length) join_stage_and_data_query = JOIN_STAGE_AND_DATA_COMMAND.format(columns=','.join([f'"{column}"' for column in columns]), row_idx_column=ROW_IDX_COLUMN, hf_fts_score=HF_FTS_SCORE) pa_table = con.execute(query=join_stage_and_data_query).arrow() return (num_rows_total, pa_table) async def create_response(pa_table: pa.Table, dataset: str, revision: str, config: str, split: str, storage_client: StorageClient, offset: int, features: Features, unsupported_columns: list[str], num_rows_total: int, partial: bool) -> PaginatedResponse: features_without_key = features.copy() features_without_key.pop(ROW_IDX_COLUMN, None) pa_table = pa_table.drop(unsupported_columns) logging.info(f'create response for dataset={dataset!r} config={config!r} split={split!r}') return PaginatedResponse(features=to_features_list(features_without_key), rows=await to_rows_list(pa_table=pa_table, dataset=dataset, revision=revision, config=config, split=split, storage_client=storage_client, offset=offset, features=features, unsupported_columns=unsupported_columns, row_idx_column=ROW_IDX_COLUMN), num_rows_total=num_rows_total, num_rows_per_page=MAX_NUM_ROWS_PER_PAGE, partial=partial) def create_search_endpoint(duckdb_index_file_directory: StrPath, cached_assets_storage_client: StorageClient, target_revision: str, hf_endpoint: str, blocked_datasets: list[str], external_auth_url: Optional[str]=None, hf_token: Optional[str]=None, hf_jwt_public_keys: Optional[list[str]]=None, hf_jwt_algorithm: Optional[str]=None, hf_timeout_seconds: Optional[float]=None, max_age_long: int=0, max_age_short: int=0, storage_clients: Optional[list[StorageClient]]=None, extensions_directory: Optional[str]=None, clean_cache_proba: float=0.0, expiredTimeIntervalSeconds: int=60) -> Endpoint: async def search_endpoint(request: Request) -> Response: revision: Optional[str] = None with StepProfiler(method='search_endpoint', step='all'): try: with StepProfiler(method='search_endpoint', step='validate parameters'): dataset = get_request_parameter(request, 'dataset', required=True) config = get_request_parameter(request, 'config', required=True) split = get_request_parameter(request, 'split', required=True) query = get_request_parameter(request, 'query', required=True) offset = get_request_parameter_offset(request) length = get_request_parameter_length(request) with StepProfiler(method='search_endpoint', step='check authentication'): await auth_check(dataset=dataset, external_auth_url=external_auth_url, request=request, hf_jwt_public_keys=hf_jwt_public_keys, hf_jwt_algorithm=hf_jwt_algorithm, hf_timeout_seconds=hf_timeout_seconds) logging.info(f'/search dataset={dataset!r} config={config!r} split={split!r} query={query!r} offset={offset!r} length={length!r}') with StepProfiler(method='search_endpoint', step='validate indexing was done'): duckdb_index_cache_entry = get_cache_entry_from_duckdb_index_job(dataset=dataset, config=config, split=split, hf_endpoint=hf_endpoint, hf_token=hf_token, hf_timeout_seconds=hf_timeout_seconds, blocked_datasets=blocked_datasets, storage_clients=storage_clients) revision = duckdb_index_cache_entry['dataset_git_revision'] if duckdb_index_cache_entry['http_status'] != HTTPStatus.OK: return get_json_error_response(content=duckdb_index_cache_entry['content'], status_code=duckdb_index_cache_entry['http_status'], max_age=max_age_short, error_code=duckdb_index_cache_entry['error_code'], revision=revision) if duckdb_index_cache_entry['content']['stemmer'] is None: raise SearchFeatureNotAvailableError('The split does not have search feature enabled.') url = duckdb_index_cache_entry['content']['url'] filename = duckdb_index_cache_entry['content']['filename'] index_size = duckdb_index_cache_entry['content']['size'] partial = duckdb_index_is_partial(url) with StepProfiler(method='search_endpoint', step='download index file if missing'): index_file_location = await get_index_file_location_and_download_if_missing(duckdb_index_file_directory=duckdb_index_file_directory, dataset=dataset, config=config, split=split, revision=revision, filename=filename, size_bytes=index_size, url=url, target_revision=target_revision, hf_token=hf_token) with StepProfiler(method='search_endpoint', step='get features'): features = Features.from_dict(duckdb_index_cache_entry['content']['features']) with StepProfiler(method='search_endpoint', step='get supported and unsupported columns'): (supported_columns, unsupported_columns) = get_supported_unsupported_columns(features) with StepProfiler(method='search_endpoint', step='perform FTS command'): logging.debug(f'connect to index file {index_file_location}') (num_rows_total, pa_table) = await anyio.to_thread.run_sync(full_text_search, index_file_location, supported_columns, query, offset, length, extensions_directory) if random.random() < clean_cache_proba: with StepProfiler(method='search_endpoint', step='clean old indexes'): clean_dir(duckdb_index_file_directory, expiredTimeIntervalSeconds) with StepProfiler(method='search_endpoint', step='create response'): response = await create_response(pa_table=pa_table, dataset=dataset, revision=revision, config=config, split=split, storage_client=cached_assets_storage_client, offset=offset, features=features or Features.from_arrow_schema(pa_table.schema), unsupported_columns=unsupported_columns, num_rows_total=num_rows_total, partial=partial) logging.info(f'transform rows finished for dataset={dataset!r} config={config!r} split={split!r}') with StepProfiler(method='search_endpoint', step='generate the OK response'): return get_json_ok_response(response, max_age=max_age_long, revision=revision) except Exception as e: error = e if isinstance(e, ApiError) else UnexpectedApiError('Unexpected error.', e) with StepProfiler(method='search_endpoint', step='generate API error response'): return get_json_api_error_response(error=error, max_age=max_age_short, revision=revision) return search_endpoint # File: dataset-viewer-main/services/sse-api/src/sse_api/app.py import asyncio import uvicorn from libapi.config import UvicornConfig from libapi.routes.healthcheck import healthcheck_endpoint from libapi.routes.metrics import create_metrics_endpoint from libapi.utils import EXPOSED_HEADERS from libcommon.constants import CACHE_COLLECTION_RESPONSES from libcommon.log import init_logging from libcommon.resources import CacheMongoResource from libcommon.simple_cache import CachedResponseDocument from motor.motor_asyncio import AsyncIOMotorClient from starlette.applications import Starlette from starlette.middleware import Middleware from starlette.middleware.cors import CORSMiddleware from starlette.routing import Route from starlette_prometheus import PrometheusMiddleware from sse_api.config import AppConfig from sse_api.routes.hub_cache import create_hub_cache_endpoint from sse_api.watcher import HubCacheWatcher def create_app() -> Starlette: app_config = AppConfig.from_env() return create_app_with_config(app_config=app_config) def create_app_with_config(app_config: AppConfig) -> Starlette: init_logging(level=app_config.log.level) with CacheMongoResource(database=app_config.cache.mongo_database, host=app_config.cache.mongo_url) as resource: if not resource.is_available(): raise Exception('MongoDB is not available') resource.create_collection(CachedResponseDocument) resource.enable_pre_and_post_images(CACHE_COLLECTION_RESPONSES) hub_cache_watcher = HubCacheWatcher(client=AsyncIOMotorClient(host=app_config.cache.mongo_url, io_loop=asyncio.get_running_loop()), db_name=app_config.cache.mongo_database, collection_name=CACHE_COLLECTION_RESPONSES) middleware = [Middleware(CORSMiddleware, allow_origins=['*'], allow_methods=['*'], allow_headers=['*'], allow_credentials=True, expose_headers=EXPOSED_HEADERS), Middleware(PrometheusMiddleware, filter_unhandled_paths=True)] routes = [Route('/sse/hub-cache', endpoint=create_hub_cache_endpoint(hub_cache_watcher=hub_cache_watcher)), Route('/healthcheck', endpoint=healthcheck_endpoint), Route('/sse/healthcheck', endpoint=healthcheck_endpoint), Route('/sse/metrics', endpoint=create_metrics_endpoint())] return Starlette(routes=routes, middleware=middleware, on_startup=[hub_cache_watcher.start_watching], on_shutdown=[hub_cache_watcher.stop_watching]) def start() -> None: uvicorn_config = UvicornConfig.from_env() uvicorn.run('app:create_app', host=uvicorn_config.hostname, port=uvicorn_config.port, factory=True, workers=uvicorn_config.num_workers) # File: dataset-viewer-main/services/sse-api/src/sse_api/config.py from dataclasses import dataclass, field from libapi.config import ApiConfig from libcommon.config import CacheConfig, CommonConfig, LogConfig, QueueConfig @dataclass(frozen=True) class AppConfig: api: ApiConfig = field(default_factory=ApiConfig) cache: CacheConfig = field(default_factory=CacheConfig) common: CommonConfig = field(default_factory=CommonConfig) log: LogConfig = field(default_factory=LogConfig) queue: QueueConfig = field(default_factory=QueueConfig) @classmethod def from_env(cls) -> 'AppConfig': common_config = CommonConfig.from_env() return cls(common=common_config, cache=CacheConfig.from_env(), log=LogConfig.from_env(), queue=QueueConfig.from_env(), api=ApiConfig.from_env(hf_endpoint=common_config.hf_endpoint)) # File: dataset-viewer-main/services/sse-api/src/sse_api/routes/hub_cache.py import dataclasses import json import logging from asyncio import CancelledError from collections.abc import AsyncGenerator, AsyncIterable from libapi.request import get_request_parameter from libapi.utils import Endpoint from sse_starlette import EventSourceResponse, ServerSentEvent from starlette.requests import Request from starlette.responses import Response from sse_api.watcher import HubCacheWatcher def create_hub_cache_endpoint(hub_cache_watcher: HubCacheWatcher) -> Endpoint: async def hub_cache_endpoint(request: Request) -> Response: logging.info('/hub-cache') all = get_request_parameter(request, 'all', default='false').lower() == 'true' (uuid, event) = hub_cache_watcher.subscribe() if all: init_task = hub_cache_watcher.run_initialization(uuid) async def event_generator() -> AsyncGenerator[ServerSentEvent, None]: try: while True: new_value = await event.wait_value() event.clear() if new_value is not None: logging.debug(f'Sending new value: {new_value}') yield ServerSentEvent(data=json.dumps(dataclasses.asdict(new_value)), event='message') finally: hub_cache_watcher.unsubscribe(uuid) if all: await init_task return EventSourceResponse(error_handling(event_generator()), media_type='text/event-stream') return hub_cache_endpoint async def error_handling(sse_generator: AsyncGenerator[ServerSentEvent, None]) -> AsyncIterable[ServerSentEvent]: try: async for event in sse_generator: yield event except CancelledError: yield ServerSentEvent('Connection closed', event='error') raise except Exception: yield ServerSentEvent('Internal server error', event='error') raise # File: dataset-viewer-main/services/sse-api/src/sse_api/watcher.py import asyncio import contextlib from collections.abc import Mapping, Sequence from dataclasses import dataclass from http import HTTPStatus from typing import Any, Optional from uuid import uuid4 from motor.motor_asyncio import AsyncIOMotorClient from pymongo.errors import PyMongoError from sse_api.constants import HUB_CACHE_KIND DatasetHubCacheResponse = Mapping[str, Any] class ChangeStreamInitError(Exception): pass @dataclass class HubCacheChangedEventValue: dataset: str hub_cache: Optional[DatasetHubCacheResponse] class HubCacheChangedEvent(asyncio.Event): _hub_cache_value: Optional[HubCacheChangedEventValue] def __init__(self, *, hub_cache_value: Optional[HubCacheChangedEventValue]=None): super().__init__() self._hub_cache_value = hub_cache_value super().set() def set_value(self, *, hub_cache_value: Optional[HubCacheChangedEventValue]=None) -> None: self._hub_cache_value = hub_cache_value return super().set() async def wait_value(self) -> Optional[HubCacheChangedEventValue]: await super().wait() return self._hub_cache_value @dataclass class HubCachePublisher: _watchers: dict[str, HubCacheChangedEvent] def _notify_change(self, *, dataset: str, hub_cache: Optional[DatasetHubCacheResponse], suscriber: Optional[str]=None) -> None: hub_cache_value = HubCacheChangedEventValue(dataset=dataset, hub_cache=hub_cache) for (watcher, event) in self._watchers.items(): if suscriber is None or suscriber == watcher: event.set_value(hub_cache_value=hub_cache_value) def _unsubscribe(self, uuid: str) -> None: self._watchers.pop(uuid) def _subscribe(self) -> tuple[str, HubCacheChangedEvent]: event = HubCacheChangedEvent() uuid = uuid4().hex self._watchers[uuid] = event return (uuid, event) class HubCacheWatcher: _watch_task: asyncio.Task[None] def __init__(self, client: AsyncIOMotorClient, db_name: str, collection_name: str) -> None: self._client = client self._collection = self._client[db_name][collection_name] self._publisher = HubCachePublisher(_watchers={}) def run_initialization(self, suscriber: str) -> asyncio.Task[Any]: return asyncio.create_task(self._init_loop(suscriber=suscriber)) def start_watching(self) -> None: self._watch_task = asyncio.create_task(self._watch_loop()) async def stop_watching(self) -> None: self._watch_task.cancel() with contextlib.suppress(asyncio.CancelledError): await self._watch_task def subscribe(self) -> tuple[str, HubCacheChangedEvent]: return self._publisher._subscribe() def unsubscribe(self, uuid: str) -> None: pub = self._publisher pub._unsubscribe(uuid) async def _init_loop(self, suscriber: str) -> None: async for document in self._collection.find(filter={'kind': HUB_CACHE_KIND}, projection={'dataset': 1, 'content': 1, 'http_status': 1}, sort=[('_id', 1)], batch_size=1): dataset = document['dataset'] self._publisher._notify_change(suscriber=suscriber, dataset=dataset, hub_cache=document['content'] if document['http_status'] == HTTPStatus.OK else None) async def _watch_loop(self) -> None: pipeline: Sequence[Mapping[str, Any]] = [{'$match': {'$or': [{'fullDocument.kind': HUB_CACHE_KIND}, {'fullDocumentBeforeChange.kind': HUB_CACHE_KIND}], 'operationType': {'$in': ['insert', 'update', 'replace', 'delete']}}}, {'$project': {'fullDocument': 1, 'fullDocumentBeforeChange': 1, 'updateDescription': 1, 'operationType': 1}}] resume_token = None while True: try: async with self._collection.watch(pipeline, resume_after=resume_token, full_document='updateLookup', full_document_before_change='whenAvailable') as stream: async for change in stream: resume_token = stream.resume_token operation = change['operationType'] if operation == 'delete' and 'fullDocumentBeforeChange' in change and (change['fullDocumentBeforeChange']['kind'] == HUB_CACHE_KIND): dataset = change['fullDocumentBeforeChange']['dataset'] self._publisher._notify_change(dataset=dataset, hub_cache=None) continue if change['fullDocument']['kind'] != HUB_CACHE_KIND: continue if operation == 'update' and (not any((field in change['updateDescription']['updatedFields'] for field in ['content', 'http_status']))): continue self._publisher._notify_change(dataset=change['fullDocument']['dataset'], hub_cache=change['fullDocument']['content'] if change['fullDocument']['http_status'] == HTTPStatus.OK else None) except PyMongoError: if resume_token is None: raise ChangeStreamInitError() # File: dataset-viewer-main/services/webhook/src/webhook/app.py import uvicorn from libapi.config import UvicornConfig from libapi.routes.healthcheck import healthcheck_endpoint from libapi.routes.metrics import create_metrics_endpoint from libapi.utils import EXPOSED_HEADERS from libcommon.cloudfront import get_cloudfront_signer from libcommon.log import init_logging from libcommon.resources import CacheMongoResource, QueueMongoResource, Resource from libcommon.storage_client import StorageClient from libcommon.url_preparator import URLPreparator from starlette.applications import Starlette from starlette.middleware import Middleware from starlette.middleware.cors import CORSMiddleware from starlette.middleware.gzip import GZipMiddleware from starlette.routing import Route from starlette_prometheus import PrometheusMiddleware from webhook.config import AppConfig from webhook.routes.webhook import create_webhook_endpoint def create_app() -> Starlette: app_config = AppConfig.from_env() return create_app_with_config(app_config=app_config) def create_app_with_config(app_config: AppConfig) -> Starlette: init_logging(level=app_config.log.level) middleware = [Middleware(CORSMiddleware, allow_origins=['*'], allow_methods=['*'], allow_headers=['*'], allow_credentials=True, expose_headers=EXPOSED_HEADERS), Middleware(GZipMiddleware), Middleware(PrometheusMiddleware, filter_unhandled_paths=True)] cached_assets_storage_client = StorageClient(protocol=app_config.cached_assets.storage_protocol, storage_root=app_config.cached_assets.storage_root, base_url=app_config.cached_assets.base_url, s3_config=app_config.s3) url_signer = get_cloudfront_signer(cloudfront_config=app_config.cloudfront) url_preparator = URLPreparator(url_signer=url_signer) assets_storage_client = StorageClient(protocol=app_config.assets.storage_protocol, storage_root=app_config.assets.storage_root, base_url=app_config.assets.base_url, s3_config=app_config.s3, url_preparator=url_preparator) storage_clients = [cached_assets_storage_client, assets_storage_client] cache_resource = CacheMongoResource(database=app_config.cache.mongo_database, host=app_config.cache.mongo_url) queue_resource = QueueMongoResource(database=app_config.queue.mongo_database, host=app_config.queue.mongo_url) resources: list[Resource] = [cache_resource, queue_resource] if not cache_resource.is_available(): raise RuntimeError('The connection to the cache database could not be established. Exiting.') if not queue_resource.is_available(): raise RuntimeError('The connection to the queue database could not be established. Exiting.') routes = [Route('/healthcheck', endpoint=healthcheck_endpoint), Route('/metrics', endpoint=create_metrics_endpoint()), Route('/webhook', endpoint=create_webhook_endpoint(hf_webhook_secret=app_config.api.hf_webhook_secret, blocked_datasets=app_config.common.blocked_datasets, hf_endpoint=app_config.common.hf_endpoint, hf_token=app_config.common.hf_token, hf_timeout_seconds=app_config.api.hf_timeout_seconds, storage_clients=storage_clients), methods=['POST'])] return Starlette(routes=routes, middleware=middleware, on_shutdown=[resource.release for resource in resources]) def start() -> None: uvicorn_config = UvicornConfig.from_env() uvicorn.run('app:create_app', host=uvicorn_config.hostname, port=uvicorn_config.port, factory=True, workers=uvicorn_config.num_workers) # File: dataset-viewer-main/services/webhook/src/webhook/config.py from dataclasses import dataclass, field from libapi.config import ApiConfig from libcommon.config import AssetsConfig, CacheConfig, CachedAssetsConfig, CloudFrontConfig, CommonConfig, LogConfig, QueueConfig, S3Config @dataclass(frozen=True) class AppConfig: api: ApiConfig = field(default_factory=ApiConfig) assets: AssetsConfig = field(default_factory=AssetsConfig) cache: CacheConfig = field(default_factory=CacheConfig) cached_assets: CachedAssetsConfig = field(default_factory=CachedAssetsConfig) cloudfront: CloudFrontConfig = field(default_factory=CloudFrontConfig) common: CommonConfig = field(default_factory=CommonConfig) log: LogConfig = field(default_factory=LogConfig) queue: QueueConfig = field(default_factory=QueueConfig) s3: S3Config = field(default_factory=S3Config) @classmethod def from_env(cls) -> 'AppConfig': common_config = CommonConfig.from_env() return cls(common=common_config, assets=AssetsConfig.from_env(), cache=CacheConfig.from_env(), cached_assets=CachedAssetsConfig.from_env(), cloudfront=CloudFrontConfig.from_env(), log=LogConfig.from_env(), queue=QueueConfig.from_env(), api=ApiConfig.from_env(hf_endpoint=common_config.hf_endpoint), s3=S3Config.from_env()) # File: dataset-viewer-main/services/webhook/src/webhook/routes/webhook.py import logging from typing import Any, Literal, Optional, TypedDict from jsonschema import ValidationError, validate from libapi.utils import Endpoint, get_response from libcommon.dtos import Priority from libcommon.exceptions import CustomError from libcommon.operations import delete_dataset, get_current_revision, smart_update_dataset, update_dataset from libcommon.prometheus import StepProfiler from libcommon.storage_client import StorageClient from starlette.requests import Request from starlette.responses import Response schema = {'$schema': 'https://json-schema.org/draft/2020-12/schema', 'type': 'object', 'properties': {'event': {'type': 'string', 'enum': ['add', 'remove', 'update', 'move']}, 'movedTo': {'type': 'string'}, 'repo': {'type': 'object', 'properties': {'headSha': {'type': 'string'}, 'name': {'type': 'string'}, 'type': {'type': 'string', 'enum': ['dataset', 'model', 'space']}}, 'required': ['type', 'name']}, 'scope': {'type': 'string'}}, 'required': ['event', 'repo', 'scope']} class _MoonWebhookV2PayloadRepo(TypedDict): type: Literal['model', 'dataset', 'space'] name: str class MoonWebhookV2PayloadRepo(_MoonWebhookV2PayloadRepo, total=False): headSha: Optional[str] class UpdatedRefDict(TypedDict): ref: str oldSha: Optional[str] newSha: Optional[str] class MoonWebhookV2Payload(TypedDict): event: Literal['add', 'remove', 'update', 'move'] movedTo: Optional[str] repo: MoonWebhookV2PayloadRepo scope: str updatedRefs: Optional[list[UpdatedRefDict]] def parse_payload(json: Any) -> MoonWebhookV2Payload: validate(instance=json, schema=schema) return json def process_payload(payload: MoonWebhookV2Payload, blocked_datasets: list[str], hf_endpoint: str, hf_token: Optional[str]=None, hf_timeout_seconds: Optional[float]=None, storage_clients: Optional[list[StorageClient]]=None) -> None: if payload['repo']['type'] != 'dataset' or payload['scope'] not in ('repo', 'repo.content', 'repo.config'): return None dataset = payload['repo']['name'] if dataset is None: return None event = payload['event'] if event == 'remove': delete_dataset(dataset=dataset, storage_clients=storage_clients) elif event in ['add', 'update', 'move']: if event == 'update' and get_current_revision(dataset) == payload['repo']['headSha'] and (not payload['scope'] == 'repo.config'): logging.warning(f'Webhook revision for {dataset} is the same as the current revision in the db - skipping update.') return None revision = payload['repo'].get('headSha') old_revision: Optional[str] = None for updated_ref in payload.get('updatedRefs') or []: ref = updated_ref.get('ref') ref_new_sha = updated_ref.get('newSha') ref_old_sha = updated_ref.get('oldSha') if ref == 'refs/heads/main' and isinstance(ref_new_sha, str) and isinstance(ref_old_sha, str): if revision != ref_new_sha: logging.warning(f'Unexpected headSha {revision} is different from newSha {ref_new_sha}. Processing webhook payload anyway.') revision = ref_new_sha old_revision = ref_old_sha new_dataset = event == 'move' and payload['movedTo'] or dataset if event == 'update' and revision and old_revision: try: smart_update_dataset(dataset=new_dataset, revision=revision, old_revision=old_revision, blocked_datasets=blocked_datasets, hf_endpoint=hf_endpoint, hf_token=hf_token, hf_timeout_seconds=hf_timeout_seconds, storage_clients=storage_clients) return None except Exception as err: logging.error(f'smart_update_dataset failed with {type(err).__name__}: {err}') delete_dataset(dataset=dataset, storage_clients=storage_clients) update_dataset(dataset=new_dataset, priority=Priority.NORMAL, blocked_datasets=blocked_datasets, hf_endpoint=hf_endpoint, hf_token=hf_token, hf_timeout_seconds=hf_timeout_seconds, storage_clients=storage_clients) return None def create_webhook_endpoint(blocked_datasets: list[str], hf_endpoint: str, hf_token: Optional[str]=None, hf_timeout_seconds: Optional[float]=None, hf_webhook_secret: Optional[str]=None, storage_clients: Optional[list[StorageClient]]=None) -> Endpoint: async def webhook_endpoint(request: Request) -> Response: with StepProfiler(method='webhook_endpoint', step='all'): with StepProfiler(method='webhook_endpoint', step='get JSON'): try: json = await request.json() except Exception: content = {'status': 'error', 'error': 'the body could not be parsed as a JSON'} logging.info('/webhook: the body could not be parsed as a JSON.') return get_response(content, 400) logging.info(f'/webhook: {json}') with StepProfiler(method='webhook_endpoint', step='parse payload and headers'): try: payload = parse_payload(json) except ValidationError as e: content = {'status': 'error', 'error': 'the JSON payload is invalid'} logging.info(f'/webhook: the JSON body is invalid. JSON: {json}. Error: {e}') return get_response(content, 400) except Exception as e: logging.exception('Unexpected error', exc_info=e) content = {'status': 'error', 'error': 'unexpected error'} logging.warning(f'/webhook: unexpected error while parsing the JSON body is invalid. Error: {e}') return get_response(content, 500) HEADER = 'x-webhook-secret' trust_sender = hf_webhook_secret is not None and (secret := request.headers.get(HEADER)) is not None and (secret == hf_webhook_secret) if not trust_sender: logging.info(f'/webhook: the sender is not trusted. JSON: {json}') return get_response({'status': 'error', 'error': 'The sender is not trusted. Retry with a valid secret.'}, 400) with StepProfiler(method='webhook_endpoint', step='process payload'): try: process_payload(payload=payload, blocked_datasets=blocked_datasets, hf_endpoint=hf_endpoint, hf_token=hf_token, hf_timeout_seconds=hf_timeout_seconds, storage_clients=storage_clients) except CustomError as e: content = {'status': 'error', 'error': 'the dataset is not supported'} dataset = payload['repo']['name'] logging.debug(f'/webhook: the dataset {dataset} is not supported. JSON: {json}. Error: {e}') return get_response(content, 400) content = {'status': 'ok'} return get_response(content, 200) return webhook_endpoint # File: dataset-viewer-main/services/worker/src/worker/config.py from dataclasses import dataclass, field from typing import Optional from environs import Env from libcommon.config import AssetsConfig, CacheConfig, CommonConfig, LogConfig, ParquetMetadataConfig, QueueConfig, RowsIndexConfig, S3Config WORKER_UVICORN_HOSTNAME = 'localhost' WORKER_UVICORN_NUM_WORKERS = 2 WORKER_UVICORN_PORT = 8000 @dataclass(frozen=True) class UvicornConfig: hostname: str = WORKER_UVICORN_HOSTNAME num_workers: int = WORKER_UVICORN_NUM_WORKERS port: int = WORKER_UVICORN_PORT @classmethod def from_env(cls) -> 'UvicornConfig': env = Env(expand_vars=True) with env.prefixed('WORKER_UVICORN_'): return cls(hostname=env.str(name='HOSTNAME', default=WORKER_UVICORN_HOSTNAME), num_workers=env.int(name='NUM_WORKERS', default=WORKER_UVICORN_NUM_WORKERS), port=env.int(name='PORT', default=WORKER_UVICORN_PORT)) WORKER_CONTENT_MAX_BYTES = 10000000 WORKER_DIFFICULTY_MAX = None WORKER_DIFFICULTY_MIN = None WORKER_HEARTBEAT_INTERVAL_SECONDS = 60 WORKER_KILL_LONG_JOB_INTERVAL_SECONDS = 60 WORKER_KILL_ZOMBIES_INTERVAL_SECONDS = 10 * 60 WORKER_MAX_JOB_DURATION_SECONDS = 20 * 60 WORKER_MAX_LOAD_PCT = 70 WORKER_MAX_MEMORY_PCT = 80 WORKER_MAX_MISSING_HEARTBEATS = 5 WORKER_SLEEP_SECONDS = 15 WORKER_STATE_FILE_PATH = None def get_empty_str_list() -> list[str]: return [] @dataclass(frozen=True) class WorkerConfig: content_max_bytes: int = WORKER_CONTENT_MAX_BYTES difficulty_max: Optional[int] = WORKER_DIFFICULTY_MAX difficulty_min: Optional[int] = WORKER_DIFFICULTY_MIN heartbeat_interval_seconds: float = WORKER_HEARTBEAT_INTERVAL_SECONDS kill_long_job_interval_seconds: float = WORKER_KILL_LONG_JOB_INTERVAL_SECONDS kill_zombies_interval_seconds: float = WORKER_KILL_ZOMBIES_INTERVAL_SECONDS max_job_duration_seconds: float = WORKER_MAX_JOB_DURATION_SECONDS max_load_pct: int = WORKER_MAX_LOAD_PCT max_memory_pct: int = WORKER_MAX_MEMORY_PCT max_missing_heartbeats: int = WORKER_MAX_MISSING_HEARTBEATS sleep_seconds: float = WORKER_SLEEP_SECONDS state_file_path: Optional[str] = WORKER_STATE_FILE_PATH @classmethod def from_env(cls) -> 'WorkerConfig': env = Env(expand_vars=True) with env.prefixed('WORKER_'): return cls(content_max_bytes=env.int(name='CONTENT_MAX_BYTES', default=WORKER_CONTENT_MAX_BYTES), difficulty_max=env.int(name='DIFFICULTY_MAX', default=WORKER_DIFFICULTY_MAX), difficulty_min=env.int(name='DIFFICULTY_MIN', default=WORKER_DIFFICULTY_MIN), heartbeat_interval_seconds=env.float(name='HEARTBEAT_INTERVAL_SECONDS', default=WORKER_HEARTBEAT_INTERVAL_SECONDS), kill_long_job_interval_seconds=env.float(name='KILL_LONG_JOB_INTERVAL_SECONDS', default=WORKER_KILL_LONG_JOB_INTERVAL_SECONDS), kill_zombies_interval_seconds=env.float(name='KILL_ZOMBIES_INTERVAL_SECONDS', default=WORKER_KILL_ZOMBIES_INTERVAL_SECONDS), max_job_duration_seconds=env.float(name='MAX_JOB_DURATION_SECONDS', default=WORKER_MAX_JOB_DURATION_SECONDS), max_load_pct=env.int(name='MAX_LOAD_PCT', default=WORKER_MAX_LOAD_PCT), max_memory_pct=env.int(name='MAX_MEMORY_PCT', default=WORKER_MAX_MEMORY_PCT), max_missing_heartbeats=env.int(name='MAX_MISSING_HEARTBEATS', default=WORKER_MAX_MISSING_HEARTBEATS), sleep_seconds=env.float(name='SLEEP_SECONDS', default=WORKER_SLEEP_SECONDS), state_file_path=env.str(name='STATE_FILE_PATH', default=WORKER_STATE_FILE_PATH)) DATASETS_BASED_HF_DATASETS_CACHE = None @dataclass(frozen=True) class DatasetsBasedConfig: hf_datasets_cache: Optional[str] = DATASETS_BASED_HF_DATASETS_CACHE @classmethod def from_env(cls) -> 'DatasetsBasedConfig': env = Env(expand_vars=True) with env.prefixed('DATASETS_BASED_'): return cls(hf_datasets_cache=env.str(name='HF_DATASETS_CACHE', default=DATASETS_BASED_HF_DATASETS_CACHE)) FIRST_ROWS_MIN_CELL_BYTES = 100 FIRST_ROWS_COLUMNS_MAX_NUMBER = 1000 FIRST_ROWS_MAX_BYTES = 1000000 FIRST_ROWS_MIN_NUMBER = 10 @dataclass(frozen=True) class FirstRowsConfig: columns_max_number: int = FIRST_ROWS_COLUMNS_MAX_NUMBER max_bytes: int = FIRST_ROWS_MAX_BYTES min_cell_bytes: int = FIRST_ROWS_MIN_CELL_BYTES min_number: int = FIRST_ROWS_MIN_NUMBER @classmethod def from_env(cls) -> 'FirstRowsConfig': env = Env(expand_vars=True) with env.prefixed('FIRST_ROWS_'): return cls(columns_max_number=env.int(name='COLUMNS_MAX_NUMBER', default=FIRST_ROWS_COLUMNS_MAX_NUMBER), max_bytes=env.int(name='MAX_BYTES', default=FIRST_ROWS_MAX_BYTES), min_cell_bytes=env.int(name='MIN_CELL_BYTES', default=FIRST_ROWS_MIN_CELL_BYTES), min_number=env.int(name='MIN_NUMBER', default=FIRST_ROWS_MIN_NUMBER)) OPT_IN_OUT_URLS_SCAN_COLUMNS_MAX_NUMBER = 10 OPT_IN_OUT_URLS_SCAN_MAX_CONCURRENT_REQUESTS_NUMBER = 100 OPT_IN_OUT_URLS_SCAN_MAX_REQUESTS_PER_SECOND = 50 OPT_IN_OUT_URLS_SCAN_ROWS_MAX_NUMBER = 100000 OPT_IN_OUT_URLS_SCAN_SPAWNING_TOKEN = None OPT_IN_OUT_URLS_SCAN_SPAWNING_URL = 'https://opts-api.spawningaiapi.com/api/v2/query/urls' OPT_IN_OUT_URLS_SCAN_URLS_NUMBER_PER_BATCH = 1000 @dataclass(frozen=True) class OptInOutUrlsScanConfig: columns_max_number: int = FIRST_ROWS_COLUMNS_MAX_NUMBER max_concurrent_requests_number: int = OPT_IN_OUT_URLS_SCAN_MAX_CONCURRENT_REQUESTS_NUMBER max_requests_per_second: int = OPT_IN_OUT_URLS_SCAN_MAX_REQUESTS_PER_SECOND rows_max_number: int = OPT_IN_OUT_URLS_SCAN_ROWS_MAX_NUMBER spawning_token: Optional[str] = OPT_IN_OUT_URLS_SCAN_SPAWNING_TOKEN spawning_url: str = OPT_IN_OUT_URLS_SCAN_SPAWNING_URL urls_number_per_batch: int = OPT_IN_OUT_URLS_SCAN_URLS_NUMBER_PER_BATCH @classmethod def from_env(cls) -> 'OptInOutUrlsScanConfig': env = Env(expand_vars=True) with env.prefixed('OPT_IN_OUT_URLS_SCAN_'): return cls(columns_max_number=env.int(name='COLUMNS_MAX_NUMBER', default=OPT_IN_OUT_URLS_SCAN_COLUMNS_MAX_NUMBER), max_concurrent_requests_number=env.int(name='MAX_CONCURRENT_REQUESTS_NUMBER', default=OPT_IN_OUT_URLS_SCAN_MAX_CONCURRENT_REQUESTS_NUMBER), max_requests_per_second=env.int(name='MAX_REQUESTS_PER_SECOND', default=OPT_IN_OUT_URLS_SCAN_MAX_REQUESTS_PER_SECOND), rows_max_number=env.int(name='ROWS_MAX_NUMBER', default=OPT_IN_OUT_URLS_SCAN_ROWS_MAX_NUMBER), spawning_token=env.str(name='SPAWNING_TOKEN', default=OPT_IN_OUT_URLS_SCAN_SPAWNING_TOKEN), spawning_url=env.str(name='SPAWNING_URL', default=OPT_IN_OUT_URLS_SCAN_SPAWNING_URL), urls_number_per_batch=env.int(name='URLS_NUMBER_PER_BATCH', default=OPT_IN_OUT_URLS_SCAN_URLS_NUMBER_PER_BATCH)) PRESIDIO_ENTITIES_SCAN_COLUMNS_MAX_NUMBER = 10 PRESIDIO_ENTITIES_SCAN_MAX_TEXT_LENGTH = 1000 PRESIDIO_ENTITIES_SCAN_ROWS_MAX_NUMBER = 10000 @dataclass(frozen=True) class PresidioEntitiesScanConfig: columns_max_number: int = PRESIDIO_ENTITIES_SCAN_COLUMNS_MAX_NUMBER max_text_length: int = PRESIDIO_ENTITIES_SCAN_MAX_TEXT_LENGTH rows_max_number: int = PRESIDIO_ENTITIES_SCAN_ROWS_MAX_NUMBER @classmethod def from_env(cls) -> 'PresidioEntitiesScanConfig': env = Env(expand_vars=True) with env.prefixed('PRESIDIO_ENTITIES_SCAN_'): return cls(columns_max_number=env.int(name='COLUMNS_MAX_NUMBER', default=PRESIDIO_ENTITIES_SCAN_COLUMNS_MAX_NUMBER), max_text_length=env.int(name='MAX_TEXT_LENGTH', default=PRESIDIO_ENTITIES_SCAN_MAX_TEXT_LENGTH), rows_max_number=env.int(name='ROWS_MAX_NUMBER', default=PRESIDIO_ENTITIES_SCAN_ROWS_MAX_NUMBER)) PARQUET_AND_INFO_COMMIT_MESSAGE = 'Update parquet files' PARQUET_AND_INFO_COMMITTER_HF_TOKEN = None PARQUET_AND_INFO_MAX_DATASET_SIZE_BYTES = 100000000 PARQUET_AND_INFO_MAX_ROW_GROUP_BYTE_SIZE_FOR_COPY = 100000000 PARQUET_AND_INFO_SOURCE_REVISION = 'main' PARQUET_AND_INFO_TARGET_REVISION = 'refs/convert/parquet' PARQUET_AND_INFO_URL_TEMPLATE = '/datasets/%s/resolve/%s/%s' @dataclass(frozen=True) class ParquetAndInfoConfig: commit_message: str = PARQUET_AND_INFO_COMMIT_MESSAGE committer_hf_token: Optional[str] = PARQUET_AND_INFO_COMMITTER_HF_TOKEN max_dataset_size_bytes: int = PARQUET_AND_INFO_MAX_DATASET_SIZE_BYTES max_row_group_byte_size_for_copy: int = PARQUET_AND_INFO_MAX_ROW_GROUP_BYTE_SIZE_FOR_COPY source_revision: str = PARQUET_AND_INFO_SOURCE_REVISION target_revision: str = PARQUET_AND_INFO_TARGET_REVISION url_template: str = PARQUET_AND_INFO_URL_TEMPLATE @classmethod def from_env(cls) -> 'ParquetAndInfoConfig': env = Env(expand_vars=True) with env.prefixed('PARQUET_AND_INFO_'): return cls(commit_message=env.str(name='COMMIT_MESSAGE', default=PARQUET_AND_INFO_COMMIT_MESSAGE), committer_hf_token=env.str(name='COMMITTER_HF_TOKEN', default=PARQUET_AND_INFO_COMMITTER_HF_TOKEN), max_dataset_size_bytes=env.int(name='MAX_DATASET_SIZE_BYTES', default=PARQUET_AND_INFO_MAX_DATASET_SIZE_BYTES), max_row_group_byte_size_for_copy=env.int(name='MAX_ROW_GROUP_BYTE_SIZE_FOR_COPY', default=PARQUET_AND_INFO_MAX_ROW_GROUP_BYTE_SIZE_FOR_COPY), source_revision=env.str(name='SOURCE_REVISION', default=PARQUET_AND_INFO_SOURCE_REVISION), target_revision=env.str(name='TARGET_REVISION', default=PARQUET_AND_INFO_TARGET_REVISION), url_template=env.str(name='URL_TEMPLATE', default=PARQUET_AND_INFO_URL_TEMPLATE)) NUMBA_CACHE_DIR: Optional[str] = None @dataclass(frozen=True) class NumbaConfig: path: Optional[str] = NUMBA_CACHE_DIR @classmethod def from_env(cls) -> 'NumbaConfig': env = Env(expand_vars=True) with env.prefixed('NUMBA_'): return cls(path=env.str(name='CACHE_DIR', default=NUMBA_CACHE_DIR)) CONFIG_NAMES_MAX_NUMBER = 3000 @dataclass(frozen=True) class ConfigNamesConfig: max_number: int = CONFIG_NAMES_MAX_NUMBER @classmethod def from_env(cls) -> 'ConfigNamesConfig': env = Env(expand_vars=True) with env.prefixed('CONFIG_NAMES_'): return cls(max_number=env.int(name='MAX_NUMBER', default=CONFIG_NAMES_MAX_NUMBER)) SPLIT_NAMES_MAX_NUMBER = 30 @dataclass(frozen=True) class SplitNamesConfig: max_number: int = SPLIT_NAMES_MAX_NUMBER @classmethod def from_env(cls) -> 'SplitNamesConfig': env = Env(expand_vars=True) with env.prefixed('SPLIT_NAMES_'): return cls(max_number=env.int(name='MAX_NUMBER', default=SPLIT_NAMES_MAX_NUMBER)) DUCKDB_INDEX_CACHE_DIRECTORY = None DUCKDB_INDEX_COMMIT_MESSAGE = 'Update duckdb index file' DUCKDB_INDEX_COMMITTER_HF_TOKEN = None DUCKDB_INDEX_MAX_SPLIT_SIZE_BYTES = 100000000 DUCKDB_INDEX_TARGET_REVISION = 'refs/convert/duckdb' DUCKDB_INDEX_URL_TEMPLATE = '/datasets/%s/resolve/%s/%s' DUCKDB_INDEX_EXTENSIONS_DIRECTORY: Optional[str] = None @dataclass(frozen=True) class DuckDbIndexConfig: cache_directory: Optional[str] = DUCKDB_INDEX_CACHE_DIRECTORY commit_message: str = DUCKDB_INDEX_COMMIT_MESSAGE committer_hf_token: Optional[str] = DUCKDB_INDEX_COMMITTER_HF_TOKEN target_revision: str = DUCKDB_INDEX_TARGET_REVISION url_template: str = DUCKDB_INDEX_URL_TEMPLATE max_split_size_bytes: int = DUCKDB_INDEX_MAX_SPLIT_SIZE_BYTES extensions_directory: Optional[str] = DUCKDB_INDEX_EXTENSIONS_DIRECTORY @classmethod def from_env(cls) -> 'DuckDbIndexConfig': env = Env(expand_vars=True) with env.prefixed('DUCKDB_INDEX_'): return cls(cache_directory=env.str(name='CACHE_DIRECTORY', default=DUCKDB_INDEX_CACHE_DIRECTORY), commit_message=env.str(name='COMMIT_MESSAGE', default=DUCKDB_INDEX_COMMIT_MESSAGE), committer_hf_token=env.str(name='COMMITTER_HF_TOKEN', default=DUCKDB_INDEX_COMMITTER_HF_TOKEN), target_revision=env.str(name='TARGET_REVISION', default=DUCKDB_INDEX_TARGET_REVISION), url_template=env.str(name='URL_TEMPLATE', default=DUCKDB_INDEX_URL_TEMPLATE), max_split_size_bytes=env.int(name='MAX_SPLIT_SIZE_BYTES', default=DUCKDB_INDEX_MAX_SPLIT_SIZE_BYTES), extensions_directory=env.str(name='EXTENSIONS_DIRECTORY', default=DUCKDB_INDEX_EXTENSIONS_DIRECTORY)) DESCRIPTIVE_STATISTICS_CACHE_DIRECTORY = None DESCRIPTIVE_STATISTICS_MAX_SPLIT_SIZE_BYTES = 100000000 @dataclass(frozen=True) class DescriptiveStatisticsConfig: cache_directory: Optional[str] = DESCRIPTIVE_STATISTICS_CACHE_DIRECTORY parquet_revision: str = PARQUET_AND_INFO_TARGET_REVISION max_split_size_bytes: int = DESCRIPTIVE_STATISTICS_MAX_SPLIT_SIZE_BYTES @classmethod def from_env(cls) -> 'DescriptiveStatisticsConfig': env = Env(expand_vars=True) parquet_revision = env.str(name='PARQUET_AND_INFO_TARGET_REVISION', default=PARQUET_AND_INFO_TARGET_REVISION) with env.prefixed('DESCRIPTIVE_STATISTICS_'): return cls(cache_directory=env.str(name='CACHE_DIRECTORY', default=DESCRIPTIVE_STATISTICS_CACHE_DIRECTORY), parquet_revision=parquet_revision, max_split_size_bytes=env.int(name='MAX_SPLIT_SIZE_BYTES', default=DESCRIPTIVE_STATISTICS_MAX_SPLIT_SIZE_BYTES)) @dataclass(frozen=True) class AppConfig: assets: AssetsConfig = field(default_factory=AssetsConfig) cache: CacheConfig = field(default_factory=CacheConfig) common: CommonConfig = field(default_factory=CommonConfig) config_names: ConfigNamesConfig = field(default_factory=ConfigNamesConfig) datasets_based: DatasetsBasedConfig = field(default_factory=DatasetsBasedConfig) first_rows: FirstRowsConfig = field(default_factory=FirstRowsConfig) log: LogConfig = field(default_factory=LogConfig) numba: NumbaConfig = field(default_factory=NumbaConfig) parquet_and_info: ParquetAndInfoConfig = field(default_factory=ParquetAndInfoConfig) queue: QueueConfig = field(default_factory=QueueConfig) rows_index: RowsIndexConfig = field(default_factory=RowsIndexConfig) s3: S3Config = field(default_factory=S3Config) split_names: SplitNamesConfig = field(default_factory=SplitNamesConfig) worker: WorkerConfig = field(default_factory=WorkerConfig) urls_scan: OptInOutUrlsScanConfig = field(default_factory=OptInOutUrlsScanConfig) presidio_scan: PresidioEntitiesScanConfig = field(default_factory=PresidioEntitiesScanConfig) parquet_metadata: ParquetMetadataConfig = field(default_factory=ParquetMetadataConfig) duckdb_index: DuckDbIndexConfig = field(default_factory=DuckDbIndexConfig) descriptive_statistics: DescriptiveStatisticsConfig = field(default_factory=DescriptiveStatisticsConfig) @classmethod def from_env(cls) -> 'AppConfig': return cls(assets=AssetsConfig.from_env(), common=CommonConfig.from_env(), config_names=ConfigNamesConfig.from_env(), cache=CacheConfig.from_env(), datasets_based=DatasetsBasedConfig.from_env(), first_rows=FirstRowsConfig.from_env(), log=LogConfig.from_env(), numba=NumbaConfig.from_env(), parquet_and_info=ParquetAndInfoConfig.from_env(), queue=QueueConfig.from_env(), s3=S3Config.from_env(), split_names=SplitNamesConfig.from_env(), worker=WorkerConfig.from_env(), urls_scan=OptInOutUrlsScanConfig.from_env(), presidio_scan=PresidioEntitiesScanConfig.from_env(), parquet_metadata=ParquetMetadataConfig.from_env(), duckdb_index=DuckDbIndexConfig.from_env(), descriptive_statistics=DescriptiveStatisticsConfig.from_env(), rows_index=RowsIndexConfig.from_env()) # File: dataset-viewer-main/services/worker/src/worker/dtos.py from collections.abc import Mapping from dataclasses import dataclass, field from typing import Any, Literal, Optional, TypedDict, Union from libcommon.dtos import FullConfigItem, FullSplitItem, SplitHubFile, SplitItem class JobRunnerInfo(TypedDict): job_type: str job_runner_version: int @dataclass class JobResult: content: Mapping[str, Any] progress: float def __post_init__(self) -> None: if self.progress < 0.0 or self.progress > 1.0: raise ValueError(f'Progress should be between 0 and 1, but got {self.progress}') @dataclass class CompleteJobResult(JobResult): content: Mapping[str, Any] progress: float = field(init=False, default=1.0) class SplitsList(TypedDict): splits: list[FullSplitItem] class FailedConfigItem(FullConfigItem): error: Mapping[str, Any] class DatasetSplitNamesResponse(TypedDict): splits: list[FullSplitItem] pending: list[FullConfigItem] failed: list[FailedConfigItem] class PreviousJob(TypedDict): dataset: str config: Optional[str] split: Optional[Union[str, None]] kind: str class OptUrl(TypedDict): url: str row_idx: int column_name: str class OptInOutUrlsCountResponse(TypedDict): urls_columns: list[str] num_opt_in_urls: int num_opt_out_urls: int num_urls: int num_scanned_rows: int has_urls_columns: bool full_scan: Union[bool, None] class OptInOutUrlsScanResponse(OptInOutUrlsCountResponse): opt_in_urls: list[OptUrl] opt_out_urls: list[OptUrl] class PresidioEntity(TypedDict): text: str type: str row_idx: int column_name: str class PresidioAllEntitiesCountResponse(TypedDict): scanned_columns: list[str] num_in_vehicle_registration_entities: int num_organization_entities: int num_sg_nric_fin_entities: int num_person_entities: int num_credit_card_entities: int num_medical_license_entities: int num_nrp_entities: int num_us_ssn_entities: int num_crypto_entities: int num_date_time_entities: int num_location_entities: int num_us_driver_license_entities: int num_phone_number_entities: int num_url_entities: int num_us_passport_entities: int num_age_entities: int num_au_acn_entities: int num_email_address_entities: int num_in_pan_entities: int num_ip_address_entities: int num_id_entities: int num_us_bank_number_entities: int num_in_aadhaar_entities: int num_us_itin_entities: int num_au_medicare_entities: int num_iban_code_entities: int num_au_tfn_entities: int num_uk_nhs_entities: int num_email_entities: int num_au_abn_entities: int num_rows_with_in_vehicle_registration_entities: int num_rows_with_organization_entities: int num_rows_with_sg_nric_fin_entities: int num_rows_with_person_entities: int num_rows_with_credit_card_entities: int num_rows_with_medical_license_entities: int num_rows_with_nrp_entities: int num_rows_with_us_ssn_entities: int num_rows_with_crypto_entities: int num_rows_with_date_time_entities: int num_rows_with_location_entities: int num_rows_with_us_driver_license_entities: int num_rows_with_phone_number_entities: int num_rows_with_url_entities: int num_rows_with_us_passport_entities: int num_rows_with_age_entities: int num_rows_with_au_acn_entities: int num_rows_with_email_address_entities: int num_rows_with_in_pan_entities: int num_rows_with_ip_address_entities: int num_rows_with_id_entities: int num_rows_with_us_bank_number_entities: int num_rows_with_in_aadhaar_entities: int num_rows_with_us_itin_entities: int num_rows_with_au_medicare_entities: int num_rows_with_iban_code_entities: int num_rows_with_au_tfn_entities: int num_rows_with_uk_nhs_entities: int num_rows_with_email_entities: int num_rows_with_au_abn_entities: int num_scanned_rows: int has_scanned_columns: bool full_scan: Union[bool, None] class PresidioEntitiesScanResponse(PresidioAllEntitiesCountResponse): entities: list[PresidioEntity] class PresidioEntitiesCountResponse(TypedDict): scanned_columns: list[str] num_rows_with_person_entities: int num_rows_with_phone_number_entities: int num_rows_with_email_address_entities: int num_rows_with_sensitive_pii: int num_scanned_rows: int has_scanned_columns: bool full_scan: Union[bool, None] class ImageUrlColumnsResponse(TypedDict): columns: list[str] class ConfigInfoResponse(TypedDict): dataset_info: dict[str, Any] partial: bool class ConfigParquetAndInfoResponse(TypedDict): parquet_files: list[SplitHubFile] dataset_info: dict[str, Any] estimated_dataset_info: Optional[dict[str, Any]] partial: bool class ParquetFileMetadataItem(SplitItem): url: str filename: str size: int num_rows: int parquet_metadata_subpath: str class ConfigParquetMetadataResponse(TypedDict): parquet_files_metadata: list[ParquetFileMetadataItem] features: Optional[dict[str, Any]] partial: bool class ConfigParquetResponse(TypedDict): parquet_files: list[SplitHubFile] features: Optional[dict[str, Any]] partial: bool class ConfigSize(TypedDict): dataset: str config: str num_bytes_original_files: Optional[int] num_bytes_parquet_files: int num_bytes_memory: int num_rows: int num_columns: int estimated_num_rows: Optional[int] class SplitSize(TypedDict): dataset: str config: str split: str num_bytes_parquet_files: int num_bytes_memory: int num_rows: int num_columns: int estimated_num_rows: Optional[int] class ConfigSizeContent(TypedDict): config: ConfigSize splits: list[SplitSize] class ConfigSizeResponse(TypedDict): size: ConfigSizeContent partial: bool class SplitDuckdbIndex(SplitHubFile): features: Optional[dict[str, Any]] partial: Optional[bool] num_rows: Optional[int] num_bytes: Optional[int] duckdb_version: str stemmer: Optional[str] class SplitDuckdbIndexSize(TypedDict): dataset: str config: str split: str has_fts: bool num_rows: int num_bytes: int class ConfigDuckdbIndexSize(TypedDict): dataset: str config: str has_fts: bool num_rows: int num_bytes: int class ConfigDuckdbIndexSizeContent(TypedDict): config: ConfigDuckdbIndexSize splits: list[SplitDuckdbIndexSize] class ConfigDuckdbIndexSizeResponse(TypedDict): size: ConfigDuckdbIndexSizeContent partial: bool class DatasetDuckdbIndexSize(TypedDict): dataset: str has_fts: bool num_rows: int num_bytes: int class DatasetDuckdbIndexSizeContent(TypedDict): dataset: DatasetDuckdbIndexSize configs: list[ConfigDuckdbIndexSize] splits: list[SplitDuckdbIndexSize] class DatasetDuckdbIndexSizeResponse(TypedDict): size: DatasetDuckdbIndexSizeContent pending: list[PreviousJob] failed: list[PreviousJob] partial: bool class ConfigNameItem(TypedDict): dataset: str config: str class DatasetConfigNamesResponse(TypedDict): config_names: list[ConfigNameItem] class DatasetInfoResponse(TypedDict): dataset_info: dict[str, Any] pending: list[PreviousJob] failed: list[PreviousJob] partial: bool class IsValidResponse(TypedDict): preview: bool viewer: bool search: bool filter: bool statistics: bool DatasetLibrary = Literal['mlcroissant', 'webdataset', 'datasets', 'pandas', 'dask', 'polars'] DatasetFormat = Literal['json', 'csv', 'parquet', 'imagefolder', 'audiofolder', 'webdataset', 'text', 'arrow'] ProgrammingLanguage = Literal['python'] class LoadingCode(TypedDict): config_name: str arguments: dict[str, Any] code: str class CompatibleLibrary(TypedDict): language: ProgrammingLanguage library: DatasetLibrary function: str loading_codes: list[LoadingCode] class DatasetCompatibleLibrariesResponse(TypedDict): libraries: list[CompatibleLibrary] formats: list[DatasetFormat] DatasetModality = Literal['image', 'audio', 'text', 'video', 'geospatial', '3d', 'tabular', 'timeseries'] class DatasetModalitiesResponse(TypedDict): modalities: list[DatasetModality] class DatasetHubCacheResponse(TypedDict): preview: bool viewer: bool partial: bool num_rows: Optional[int] libraries: list[DatasetLibrary] modalities: list[DatasetModality] formats: list[DatasetFormat] class _Filetype(TypedDict): extension: str count: int class Filetype(_Filetype, total=False): archived_in: str compressed_in: str class DatasetFiletypesResponse(TypedDict): filetypes: list[Filetype] class DatasetParquetResponse(TypedDict): parquet_files: list[SplitHubFile] pending: list[PreviousJob] failed: list[PreviousJob] partial: bool class DatasetSize(TypedDict): dataset: str num_bytes_original_files: Optional[int] num_bytes_parquet_files: int num_bytes_memory: int num_rows: int estimated_num_rows: Optional[int] class DatasetSizeContent(TypedDict): dataset: DatasetSize configs: list[ConfigSize] splits: list[SplitSize] class DatasetSizeResponse(TypedDict): size: DatasetSizeContent pending: list[PreviousJob] failed: list[PreviousJob] partial: bool # File: dataset-viewer-main/services/worker/src/worker/executor.py import asyncio import logging import os import signal import sys from collections.abc import Callable from datetime import datetime, timedelta from random import random from typing import Any, Optional, Union import orjson from filelock import FileLock from libcommon.queue.jobs import Queue from libcommon.utils import get_datetime, get_duration from mirakuru import OutputExecutor, ProcessExitedWithError, TCPExecutor from worker import start_web_app, start_worker_loop from worker.config import AppConfig, UvicornConfig from worker.job_manager import JobManager from worker.job_runner_factory import JobRunnerFactory from worker.loop import WorkerState START_WORKER_LOOP_PATH = start_worker_loop.__file__ START_WEB_APP_PATH = start_web_app.__file__ async def every(func: Callable[..., Optional[Any]], *args: Any, seconds: Union[float, tuple[float, float]], stop_on: Optional[Any]=None, **kwargs: Any) -> None: while True: out = func(*args, **kwargs) if stop_on is not None and out == stop_on: break delay = seconds[0] + (seconds[1] - seconds[0]) * random() if isinstance(seconds, tuple) else seconds await asyncio.sleep(delay) class BadWorkerState(RuntimeError): pass class WorkerExecutor: def __init__(self, app_config: AppConfig, job_runner_factory: JobRunnerFactory, state_file_path: str) -> None: self.app_config = app_config self.job_runner_factory = job_runner_factory self.state_file_path = state_file_path max_missing_heartbeats = self.app_config.worker.max_missing_heartbeats heartbeat_interval_seconds = self.app_config.worker.heartbeat_interval_seconds self.max_seconds_without_heartbeat_for_zombies = heartbeat_interval_seconds * max_missing_heartbeats self.heartbeat_interval_seconds = self.app_config.worker.heartbeat_interval_seconds self.max_job_duration_seconds = self.app_config.worker.max_job_duration_seconds self.kill_zombies_interval_seconds = self.app_config.worker.kill_zombies_interval_seconds self.kill_long_job_interval_seconds = self.app_config.worker.kill_long_job_interval_seconds self.executors: list[Union[OutputExecutor, TCPExecutor]] = [] def _create_worker_loop_executor(self) -> OutputExecutor: banner = self.state_file_path start_worker_loop_command = [sys.executable, START_WORKER_LOOP_PATH, '--print-worker-state-path'] return OutputExecutor(start_worker_loop_command, banner, timeout=10) def _create_web_app_executor(self) -> TCPExecutor: logging.info('Starting webapp for /healthcheck and /metrics.') start_web_app_command = [sys.executable, START_WEB_APP_PATH] uvicorn_config = UvicornConfig.from_env() return TCPExecutor(start_web_app_command, host=uvicorn_config.hostname, port=uvicorn_config.port, timeout=10) def start(self) -> None: worker_loop_executor = self._create_worker_loop_executor() worker_loop_executor.start() self.executors.append(worker_loop_executor) web_app_executor = self._create_web_app_executor() web_app_executor.start() self.executors.append(web_app_executor) loop = asyncio.get_event_loop() loop.add_signal_handler(signal.SIGTERM, self.sigterm_stop) logging.info('Starting heartbeat.') loop.create_task(every(self.heartbeat, seconds=self.heartbeat_interval_seconds)) loop.create_task(every(self.kill_zombies, seconds=(self.kill_zombies_interval_seconds * 0.5, self.kill_zombies_interval_seconds * 1.5))) loop.create_task(every(self.kill_long_job, worker_loop_executor=worker_loop_executor, seconds=(self.kill_long_job_interval_seconds * 0.5, self.kill_long_job_interval_seconds * 1.5))) loop.run_until_complete(every(self.is_worker_alive, worker_loop_executor=worker_loop_executor, seconds=1.0, stop_on=False)) logging.info('Executor loop finished.') def sigterm_stop(self) -> None: logging.error('Executor received SIGTERM') self.stop() def stop(self) -> None: for executor in self.executors: executor.stop() def get_state(self) -> Optional[WorkerState]: worker_state_file_path = self.state_file_path if not os.path.exists(worker_state_file_path): return None with FileLock(f'{worker_state_file_path}.lock'): try: with open(worker_state_file_path, 'rb') as worker_state_f: worker_state = orjson.loads(worker_state_f.read()) return WorkerState(current_job_info=worker_state.get('current_job_info'), last_updated=datetime.fromisoformat(worker_state['last_updated'])) except (orjson.JSONDecodeError, KeyError) as err: raise BadWorkerState(f'Failed to read worker state at {worker_state_file_path}') from err def heartbeat(self) -> None: worker_state = self.get_state() if worker_state and worker_state['current_job_info']: job_id = worker_state['current_job_info']['job_id'] try: Queue().heartbeat(job_id=job_id) except Exception as error: logging.warning(f'Heartbeat failed for job {job_id}: {error}') self.stop() def kill_zombies(self) -> None: queue = Queue() zombies = queue.get_zombies(max_seconds_without_heartbeat=self.max_seconds_without_heartbeat_for_zombies) message = 'Job manager crashed while running this job (missing heartbeats).' for zombie in zombies: job_runner = self.job_runner_factory.create_job_runner(zombie) job_manager = JobManager(job_info=zombie, app_config=self.app_config, job_runner=job_runner) job_manager.set_crashed(message=message) logging.info(f'Killing zombie. Job info = {zombie}') def kill_long_job(self, worker_loop_executor: OutputExecutor) -> None: worker_state = self.get_state() if worker_state and worker_state['current_job_info']: long_job = worker_state['current_job_info'] last_updated = worker_state['last_updated'] coefficient = 10 if long_job['params']['dataset'] == 'cerebras/SlimPajama-627B' else 1 if last_updated + timedelta(seconds=coefficient * self.max_job_duration_seconds) <= get_datetime(): _duration_seconds = int(get_duration(last_updated)) logging.warning(f'Job {long_job} exceeded maximum duration of {self.max_job_duration_seconds} seconds ({_duration_seconds} seconds).') try: worker_loop_executor.stop() finally: logging.info(f'Killing a long job. Job info = {long_job}') job_runner = self.job_runner_factory.create_job_runner(long_job) job_manager = JobManager(job_info=long_job, app_config=self.app_config, job_runner=job_runner) message = 'Job manager was killed while running this job (job exceeded maximum duration).' job_manager.set_exceeded_maximum_duration(message=message) def is_worker_alive(self, worker_loop_executor: OutputExecutor) -> bool: if worker_loop_executor.running(): return True try: worker_loop_executor.stop() except ProcessExitedWithError as err: explanation = f'exit code {err.exit_code}' if err.exit_code == -9: explanation += ' SIGKILL - surely an OOM' error_msg = f'Worker crashed ({explanation})' state = self.get_state() if state and state['current_job_info']: error_msg += f" when running job_id={state['current_job_info']['job_id']}" logging.error(error_msg) raise except BaseException as err: explanation = f'{type(err).__name__}: {err}' error_msg = f'Worker crashed ({explanation})' state = self.get_state() if state and state['current_job_info']: error_msg += f" when running job_id={state['current_job_info']['job_id']}" logging.error(error_msg) raise if worker_loop_executor.process: return_code = worker_loop_executor.process.returncode if return_code is not None and return_code != 0: explanation = f'return code {return_code}' if return_code == -9: explanation += ' SIGKILL - surely an OOM' error_msg = f'Worker crashed ({explanation})' state = self.get_state() if state and state['current_job_info']: error_msg += f" when running job_id={state['current_job_info']['job_id']}" logging.error(error_msg) raise return False # File: dataset-viewer-main/services/worker/src/worker/job_manager.py import logging from http import HTTPStatus from typing import Optional from libcommon.config import CommonConfig from libcommon.dtos import JobInfo, JobParams, JobResult, Priority from libcommon.exceptions import CustomError, DatasetNotFoundError, JobManagerCrashedError, JobManagerExceededMaximumDurationError, PreviousStepStillProcessingError, TooBigContentError, UnexpectedError from libcommon.orchestrator import finish_job from libcommon.processing_graph import processing_graph from libcommon.simple_cache import CachedArtifactError, CachedArtifactNotFoundError from libcommon.utils import get_duration_or_none, orjson_dumps from worker.config import AppConfig, WorkerConfig from worker.job_runner import JobRunner class JobManager: job_id: str job_params: JobParams priority: Priority worker_config: WorkerConfig common_config: CommonConfig job_runner: JobRunner job_runner_version: int def __init__(self, job_info: JobInfo, app_config: AppConfig, job_runner: JobRunner) -> None: self.job_info = job_info self.job_type = job_info['type'] self.job_id = job_info['job_id'] self.priority = job_info['priority'] self.job_params = job_info['params'] self.common_config = app_config.common self.worker_config = app_config.worker self.job_runner = job_runner self.job_runner_version = processing_graph.get_processing_step_by_job_type(self.job_type).job_runner_version self.setup() def setup(self) -> None: job_type = self.job_runner.get_job_type() if self.job_type != job_type: raise ValueError(f'The submitted job type is {self.job_type}, but the job manager only processes {job_type}') def __str__(self) -> str: return f"JobManager(job_id={self.job_id} dataset={self.job_params['dataset']} job_info={self.job_info}" def log(self, level: int, msg: str) -> None: logging.log(level=level, msg=f'[{self.job_type}] {msg}') def debug(self, msg: str) -> None: self.log(level=logging.DEBUG, msg=msg) def info(self, msg: str) -> None: self.log(level=logging.INFO, msg=msg) def warning(self, msg: str) -> None: self.log(level=logging.WARNING, msg=msg) def exception(self, msg: str) -> None: self.log(level=logging.ERROR, msg=msg) def critical(self, msg: str) -> None: self.log(level=logging.CRITICAL, msg=msg) def run_job(self) -> JobResult: try: self.job_runner.validate() job_result: JobResult = self.process() except Exception: job_result = {'job_info': self.job_info, 'job_runner_version': self.job_runner_version, 'is_success': False, 'output': None, 'duration': get_duration_or_none(self.job_info['started_at'])} result_str = 'SUCCESS' if job_result['is_success'] else 'ERROR' self.debug(f'job output with {result_str} - {self}') return job_result def finish(self, job_result: JobResult) -> None: finish_job(job_result=job_result) def process(self) -> JobResult: self.info(f'compute {self}') started_at = self.job_info['started_at'] try: try: self.job_runner.pre_compute() job_result = self.job_runner.compute() content = job_result.content if len(orjson_dumps(content)) > self.worker_config.content_max_bytes: raise TooBigContentError(f'The computed response content exceeds the supported size in bytes ({self.worker_config.content_max_bytes}).') except CachedArtifactNotFoundError as err: raise PreviousStepStillProcessingError(message='The previous steps are still being processed', cause=err) finally: self.job_runner.post_compute() self.debug(f"dataset={self.job_params['dataset']} revision={self.job_params['revision']} job_info={self.job_info} is valid") return {'job_info': self.job_info, 'job_runner_version': self.job_runner_version, 'is_success': True, 'output': {'content': content, 'http_status': HTTPStatus.OK, 'error_code': None, 'details': None, 'progress': job_result.progress}, 'duration': get_duration_or_none(started_at)} except DatasetNotFoundError: self.debug(f"the dataset={self.job_params['dataset']} could not be found, don't update the cache") return {'job_info': self.job_info, 'job_runner_version': self.job_runner_version, 'is_success': False, 'output': None, 'duration': get_duration_or_none(started_at)} except CachedArtifactError as err: self.debug(f'response for job_info={self.job_info} had an error from a previous step') return {'job_info': self.job_info, 'job_runner_version': self.job_runner_version, 'is_success': False, 'output': {'content': err.cache_entry_with_details['content'], 'http_status': err.cache_entry_with_details['http_status'], 'error_code': err.cache_entry_with_details['error_code'], 'details': err.enhanced_details, 'progress': None}, 'duration': get_duration_or_none(started_at)} except Exception as err: e = err if isinstance(err, CustomError) else UnexpectedError(str(err), err) self.debug(f'response for job_info={self.job_info} had an error') return {'job_info': self.job_info, 'job_runner_version': self.job_runner_version, 'is_success': False, 'output': {'content': dict(e.as_response()), 'http_status': e.status_code, 'error_code': e.code, 'details': dict(e.as_response_with_cause()), 'progress': None}, 'duration': get_duration_or_none(started_at)} def set_crashed(self, message: str, cause: Optional[BaseException]=None) -> None: self.info(f"response for dataset={self.job_params['dataset']} revision={self.job_params['revision']} job_info={self.job_info} had an error (crashed)") error = JobManagerCrashedError(message=message, cause=cause) self.finish(job_result={'job_info': self.job_info, 'job_runner_version': self.job_runner_version, 'is_success': False, 'output': {'content': dict(error.as_response()), 'http_status': error.status_code, 'error_code': error.code, 'details': dict(error.as_response_with_cause()), 'progress': None}, 'duration': get_duration_or_none(self.job_info['started_at'])}) def set_exceeded_maximum_duration(self, message: str, cause: Optional[BaseException]=None) -> None: self.info(f"response for dataset={self.job_params['dataset']} revision={self.job_params['revision']} job_info={self.job_info} had an error (exceeded maximum duration)") error = JobManagerExceededMaximumDurationError(message=message, cause=cause) self.finish(job_result={'job_info': self.job_info, 'job_runner_version': self.job_runner_version, 'is_success': False, 'output': {'content': dict(error.as_response()), 'http_status': error.status_code, 'error_code': error.code, 'details': dict(error.as_response_with_cause()), 'progress': None}, 'duration': get_duration_or_none(self.job_info['started_at'])}) # File: dataset-viewer-main/services/worker/src/worker/job_runner.py from abc import ABC, abstractmethod from libcommon.dtos import JobInfo from worker.config import AppConfig from worker.dtos import JobResult class JobRunner(ABC): job_info: JobInfo app_config: AppConfig @staticmethod @abstractmethod def get_job_type() -> str: pass def __init__(self, job_info: JobInfo, app_config: AppConfig) -> None: self.job_info = job_info self.app_config = app_config def pre_compute(self) -> None: pass @abstractmethod def compute(self) -> JobResult: pass def post_compute(self) -> None: pass def validate(self) -> None: pass # File: dataset-viewer-main/services/worker/src/worker/job_runner_factory.py from abc import ABC, abstractmethod from dataclasses import dataclass from pathlib import Path from libcommon.dtos import JobInfo from libcommon.storage import StrPath from libcommon.storage_client import StorageClient from worker.config import AppConfig from worker.job_runner import JobRunner from worker.job_runners.config.duckdb_index_size import ConfigDuckdbIndexSizeJobRunner from worker.job_runners.config.info import ConfigInfoJobRunner from worker.job_runners.config.is_valid import ConfigIsValidJobRunner from worker.job_runners.config.opt_in_out_urls_count import ConfigOptInOutUrlsCountJobRunner from worker.job_runners.config.parquet import ConfigParquetJobRunner from worker.job_runners.config.parquet_and_info import ConfigParquetAndInfoJobRunner from worker.job_runners.config.parquet_metadata import ConfigParquetMetadataJobRunner from worker.job_runners.config.size import ConfigSizeJobRunner from worker.job_runners.config.split_names import ConfigSplitNamesJobRunner from worker.job_runners.dataset.compatible_libraries import DatasetCompatibleLibrariesJobRunner from worker.job_runners.dataset.config_names import DatasetConfigNamesJobRunner from worker.job_runners.dataset.croissant_crumbs import DatasetCroissantCrumbsJobRunner from worker.job_runners.dataset.duckdb_index_size import DatasetDuckdbIndexSizeJobRunner from worker.job_runners.dataset.filetypes import DatasetFiletypesJobRunner from worker.job_runners.dataset.hub_cache import DatasetHubCacheJobRunner from worker.job_runners.dataset.info import DatasetInfoJobRunner from worker.job_runners.dataset.is_valid import DatasetIsValidJobRunner from worker.job_runners.dataset.modalities import DatasetModalitiesJobRunner from worker.job_runners.dataset.opt_in_out_urls_count import DatasetOptInOutUrlsCountJobRunner from worker.job_runners.dataset.parquet import DatasetParquetJobRunner from worker.job_runners.dataset.presidio_entities_count import DatasetPresidioEntitiesCountJobRunner from worker.job_runners.dataset.size import DatasetSizeJobRunner from worker.job_runners.dataset.split_names import DatasetSplitNamesJobRunner from worker.job_runners.split.descriptive_statistics import SplitDescriptiveStatisticsJobRunner from worker.job_runners.split.duckdb_index import SplitDuckDbIndexJobRunner from worker.job_runners.split.first_rows import SplitFirstRowsJobRunner from worker.job_runners.split.image_url_columns import SplitImageUrlColumnsJobRunner from worker.job_runners.split.is_valid import SplitIsValidJobRunner from worker.job_runners.split.opt_in_out_urls_count import SplitOptInOutUrlsCountJobRunner from worker.job_runners.split.opt_in_out_urls_scan_from_streaming import SplitOptInOutUrlsScanJobRunner from worker.job_runners.split.presidio_scan import SplitPresidioEntitiesScanJobRunner class BaseJobRunnerFactory(ABC): def create_job_runner(self, job_info: JobInfo) -> JobRunner: return self._create_job_runner(job_info=job_info) @abstractmethod def _create_job_runner(self, job_info: JobInfo) -> JobRunner: pass @dataclass class JobRunnerFactory(BaseJobRunnerFactory): app_config: AppConfig hf_datasets_cache: Path parquet_metadata_directory: StrPath duckdb_index_cache_directory: StrPath statistics_cache_directory: StrPath storage_client: StorageClient def _create_job_runner(self, job_info: JobInfo) -> JobRunner: job_type = job_info['type'] if job_type == DatasetConfigNamesJobRunner.get_job_type(): return DatasetConfigNamesJobRunner(job_info=job_info, app_config=self.app_config, hf_datasets_cache=self.hf_datasets_cache) if job_type == DatasetFiletypesJobRunner.get_job_type(): return DatasetFiletypesJobRunner(job_info=job_info, app_config=self.app_config, hf_datasets_cache=self.hf_datasets_cache) if job_type == ConfigSplitNamesJobRunner.get_job_type(): return ConfigSplitNamesJobRunner(job_info=job_info, app_config=self.app_config, hf_datasets_cache=self.hf_datasets_cache) if job_type == SplitFirstRowsJobRunner.get_job_type(): return SplitFirstRowsJobRunner(job_info=job_info, app_config=self.app_config, hf_datasets_cache=self.hf_datasets_cache, parquet_metadata_directory=self.parquet_metadata_directory, storage_client=self.storage_client) if job_type == ConfigParquetAndInfoJobRunner.get_job_type(): return ConfigParquetAndInfoJobRunner(job_info=job_info, app_config=self.app_config, hf_datasets_cache=self.hf_datasets_cache) if job_type == ConfigParquetJobRunner.get_job_type(): return ConfigParquetJobRunner(job_info=job_info, app_config=self.app_config) if job_type == ConfigParquetMetadataJobRunner.get_job_type(): return ConfigParquetMetadataJobRunner(job_info=job_info, app_config=self.app_config, parquet_metadata_directory=self.parquet_metadata_directory) if job_type == DatasetParquetJobRunner.get_job_type(): return DatasetParquetJobRunner(job_info=job_info, app_config=self.app_config) if job_type == DatasetInfoJobRunner.get_job_type(): return DatasetInfoJobRunner(job_info=job_info, app_config=self.app_config) if job_type == ConfigInfoJobRunner.get_job_type(): return ConfigInfoJobRunner(job_info=job_info, app_config=self.app_config) if job_type == DatasetSizeJobRunner.get_job_type(): return DatasetSizeJobRunner(job_info=job_info, app_config=self.app_config) if job_type == ConfigSizeJobRunner.get_job_type(): return ConfigSizeJobRunner(job_info=job_info, app_config=self.app_config) if job_type == DatasetSplitNamesJobRunner.get_job_type(): return DatasetSplitNamesJobRunner(job_info=job_info, app_config=self.app_config) if job_type == SplitIsValidJobRunner.get_job_type(): return SplitIsValidJobRunner(job_info=job_info, app_config=self.app_config) if job_type == ConfigIsValidJobRunner.get_job_type(): return ConfigIsValidJobRunner(job_info=job_info, app_config=self.app_config) if job_type == DatasetIsValidJobRunner.get_job_type(): return DatasetIsValidJobRunner(job_info=job_info, app_config=self.app_config) if job_type == SplitImageUrlColumnsJobRunner.get_job_type(): return SplitImageUrlColumnsJobRunner(job_info=job_info, app_config=self.app_config) if job_type == SplitOptInOutUrlsScanJobRunner.get_job_type(): return SplitOptInOutUrlsScanJobRunner(job_info=job_info, app_config=self.app_config, hf_datasets_cache=self.hf_datasets_cache) if job_type == ConfigOptInOutUrlsCountJobRunner.get_job_type(): return ConfigOptInOutUrlsCountJobRunner(job_info=job_info, app_config=self.app_config) if job_type == DatasetOptInOutUrlsCountJobRunner.get_job_type(): return DatasetOptInOutUrlsCountJobRunner(job_info=job_info, app_config=self.app_config) if job_type == SplitOptInOutUrlsCountJobRunner.get_job_type(): return SplitOptInOutUrlsCountJobRunner(job_info=job_info, app_config=self.app_config) if job_type == SplitPresidioEntitiesScanJobRunner.get_job_type(): return SplitPresidioEntitiesScanJobRunner(job_info=job_info, app_config=self.app_config, hf_datasets_cache=self.hf_datasets_cache) if job_type == DatasetPresidioEntitiesCountJobRunner.get_job_type(): return DatasetPresidioEntitiesCountJobRunner(job_info=job_info, app_config=self.app_config) if job_type == SplitDescriptiveStatisticsJobRunner.get_job_type(): return SplitDescriptiveStatisticsJobRunner(job_info=job_info, app_config=self.app_config, statistics_cache_directory=self.statistics_cache_directory, parquet_metadata_directory=self.parquet_metadata_directory) if job_type == SplitDuckDbIndexJobRunner.get_job_type(): return SplitDuckDbIndexJobRunner(job_info=job_info, app_config=self.app_config, duckdb_index_cache_directory=self.duckdb_index_cache_directory, parquet_metadata_directory=self.parquet_metadata_directory) if job_type == ConfigDuckdbIndexSizeJobRunner.get_job_type(): return ConfigDuckdbIndexSizeJobRunner(job_info=job_info, app_config=self.app_config) if job_type == DatasetDuckdbIndexSizeJobRunner.get_job_type(): return DatasetDuckdbIndexSizeJobRunner(job_info=job_info, app_config=self.app_config) if job_type == DatasetHubCacheJobRunner.get_job_type(): return DatasetHubCacheJobRunner(job_info=job_info, app_config=self.app_config) if job_type == DatasetCompatibleLibrariesJobRunner.get_job_type(): return DatasetCompatibleLibrariesJobRunner(job_info=job_info, app_config=self.app_config, hf_datasets_cache=self.hf_datasets_cache) if job_type == DatasetModalitiesJobRunner.get_job_type(): return DatasetModalitiesJobRunner(job_info=job_info, app_config=self.app_config) if job_type == DatasetCroissantCrumbsJobRunner.get_job_type(): return DatasetCroissantCrumbsJobRunner(job_info=job_info, app_config=self.app_config) raise KeyError(f"Unsupported job type: '{job_type}'.") # File: dataset-viewer-main/services/worker/src/worker/job_runners/_job_runner_with_cache.py import json import random import re from hashlib import sha1 from pathlib import Path from typing import Optional from libcommon.dtos import JobInfo from libcommon.exceptions import DiskError from libcommon.storage import init_dir, remove_dir from worker.config import AppConfig from worker.job_runner import JobRunner class JobRunnerWithCache(JobRunner): base_cache_directory: Path cache_subdirectory: Optional[Path] = None def __init__(self, job_info: JobInfo, app_config: AppConfig, cache_directory: Path) -> None: super().__init__(job_info=job_info, app_config=app_config) self.base_cache_directory = cache_directory def get_cache_subdirectory(self, digits: int=14) -> str: random_str = f'{random.randrange(10 ** (digits - 1), 10 ** digits)}' payload = (random_str, self.get_job_type(), self.job_info['params']['dataset'], self.job_info['params']['config'], self.job_info['params']['split']) hash_suffix = sha1(json.dumps(payload, sort_keys=True).encode(), usedforsecurity=False).hexdigest()[:8] prefix = f"{random_str}-{self.get_job_type()}-{self.job_info['params']['dataset']}"[:64] subdirectory = f'{prefix}-{hash_suffix}' return ''.join([c if re.match('[\\w-]', c) else '-' for c in subdirectory]) def pre_compute(self) -> None: new_directory = self.base_cache_directory / self.get_cache_subdirectory() try: self.cache_subdirectory = Path(init_dir(new_directory)) except PermissionError as e: raise DiskError(f'Incorrect permissions on {new_directory}', e) from e def post_compute(self) -> None: previous_cache = self.cache_subdirectory if previous_cache is not None: remove_dir(previous_cache) self.cache_subdirectory = None # File: dataset-viewer-main/services/worker/src/worker/job_runners/_job_runner_with_datasets_cache.py import logging from pathlib import Path from typing import Optional import datasets.config from libcommon.dtos import JobInfo from worker.config import AppConfig from worker.job_runners._job_runner_with_cache import JobRunnerWithCache class JobRunnerWithDatasetsCache(JobRunnerWithCache): def __init__(self, job_info: JobInfo, app_config: AppConfig, hf_datasets_cache: Path) -> None: super().__init__(job_info=job_info, app_config=app_config, cache_directory=hf_datasets_cache) def set_datasets_cache(self, cache_subdirectory: Optional[Path]) -> None: datasets.config.HF_DATASETS_CACHE = cache_subdirectory logging.debug(f'datasets data cache set to: {datasets.config.HF_DATASETS_CACHE}') datasets.config.DOWNLOADED_DATASETS_PATH = datasets.config.HF_DATASETS_CACHE / datasets.config.DOWNLOADED_DATASETS_DIR datasets.config.EXTRACTED_DATASETS_PATH = datasets.config.HF_DATASETS_CACHE / datasets.config.EXTRACTED_DATASETS_DIR def pre_compute(self) -> None: super().pre_compute() self.set_datasets_cache(self.cache_subdirectory) def post_compute(self) -> None: super().post_compute() self.set_datasets_cache(self.base_cache_directory) # File: dataset-viewer-main/services/worker/src/worker/job_runners/config/config_job_runner.py from pathlib import Path from libcommon.dtos import JobInfo from libcommon.exceptions import ParameterMissingError from worker.config import AppConfig from worker.job_runners._job_runner_with_datasets_cache import JobRunnerWithDatasetsCache from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner from worker.utils import check_config_exists class ConfigJobRunner(DatasetJobRunner): config: str def __init__(self, job_info: JobInfo, app_config: AppConfig) -> None: super().__init__(job_info=job_info, app_config=app_config) if job_info['params']['config'] is None: raise ParameterMissingError("'config' parameter is required") self.config = job_info['params']['config'] def validate(self) -> None: check_config_exists(dataset=self.dataset, config=self.config) class ConfigJobRunnerWithDatasetsCache(JobRunnerWithDatasetsCache, ConfigJobRunner): def __init__(self, job_info: JobInfo, app_config: AppConfig, hf_datasets_cache: Path) -> None: JobRunnerWithDatasetsCache.__init__(self=self, job_info=job_info, app_config=app_config, hf_datasets_cache=hf_datasets_cache) ConfigJobRunner.__init__(self=self, job_info=job_info, app_config=app_config) # File: dataset-viewer-main/services/worker/src/worker/job_runners/config/duckdb_index_size.py import logging from http import HTTPStatus from libcommon.exceptions import PreviousStepFormatError from libcommon.simple_cache import CachedArtifactNotFoundError, get_response from worker.dtos import CompleteJobResult, ConfigDuckdbIndexSize, ConfigDuckdbIndexSizeResponse, SplitDuckdbIndexSize from worker.job_runners.config.config_job_runner import ConfigJobRunner from worker.utils import get_split_names def compute_config_duckdb_index_size_response(dataset: str, config: str) -> ConfigDuckdbIndexSizeResponse: logging.info(f"compute 'config-duckdb-index-size' for dataset={dataset!r} config={config!r}") splits = get_split_names(dataset=dataset, config=config) try: total = 0 split_duckdb_index_sizes: list[SplitDuckdbIndexSize] = [] partial = False for split in splits: total += 1 try: duckdb_index_response = get_response(kind='split-duckdb-index', dataset=dataset, config=config, split=split) config_info_response = get_response(kind='config-info', dataset=dataset, config=config) except CachedArtifactNotFoundError: logging.debug("No response found in previous step for this dataset: 'split-duckdb-index' or 'config-info'.") continue if duckdb_index_response['http_status'] != HTTPStatus.OK: logging.debug(f"Previous step gave an error: {duckdb_index_response['http_status']}.") continue if config_info_response['http_status'] != HTTPStatus.OK: logging.debug(f"Previous step gave an error: {config_info_response['http_status']}.") continue split_duckdb_index = duckdb_index_response['content'] config_info = config_info_response['content'] if 'num_rows' in split_duckdb_index and isinstance(split_duckdb_index['num_rows'], int) and ('num_bytes' in split_duckdb_index) and isinstance(split_duckdb_index['num_bytes'], int): split_duckdb_index_sizes.append(SplitDuckdbIndexSize(dataset=dataset, config=config, split=split, has_fts=split_duckdb_index['stemmer'] is not None, num_rows=split_duckdb_index['num_rows'], num_bytes=split_duckdb_index['num_bytes'])) partial = partial or split_duckdb_index['partial'] else: split_info = config_info['dataset_info']['splits'][split] split_duckdb_index_sizes.append(SplitDuckdbIndexSize(dataset=dataset, config=config, split=split, has_fts=split_duckdb_index['stemmer'] is not None, num_rows=split_info['num_rows'], num_bytes=split_info['num_examples'])) partial = partial or config_info['partial'] except Exception as e: raise PreviousStepFormatError('Previous step did not return the expected content.', e) from e config_duckdb_index_size = ConfigDuckdbIndexSize(dataset=dataset, config=config, has_fts=any((split_duckdb_index_size['has_fts'] for split_duckdb_index_size in split_duckdb_index_sizes)), num_rows=sum((split_duckdb_index_size['num_rows'] for split_duckdb_index_size in split_duckdb_index_sizes)), num_bytes=sum((split_duckdb_index_size['num_bytes'] for split_duckdb_index_size in split_duckdb_index_sizes))) return ConfigDuckdbIndexSizeResponse({'size': {'config': config_duckdb_index_size, 'splits': split_duckdb_index_sizes}, 'partial': partial}) class ConfigDuckdbIndexSizeJobRunner(ConfigJobRunner): @staticmethod def get_job_type() -> str: return 'config-duckdb-index-size' def compute(self) -> CompleteJobResult: return CompleteJobResult(compute_config_duckdb_index_size_response(dataset=self.dataset, config=self.config)) # File: dataset-viewer-main/services/worker/src/worker/job_runners/config/info.py import logging from libcommon.exceptions import PreviousStepFormatError from libcommon.simple_cache import get_previous_step_or_raise from worker.dtos import CompleteJobResult, ConfigInfoResponse from worker.job_runners.config.config_job_runner import ConfigJobRunner def compute_config_info_response(dataset: str, config: str) -> ConfigInfoResponse: logging.info(f"compute 'config-info' for dataset={dataset!r} and config={config!r}") previous_step = 'config-parquet-and-info' dataset_info_response = get_previous_step_or_raise(kind=previous_step, dataset=dataset, config=config) content = dataset_info_response['content'] try: config_info = content['dataset_info'] partial = content['partial'] except Exception as e: raise PreviousStepFormatError(f"Previous step '{previous_step}' did not return the expected content: 'dataset_info'.", e) from e if not isinstance(config_info, dict): raise PreviousStepFormatError('Previous step did not return the expected content.', TypeError(f'dataset_info should be a dict, but got {type(config_info)}')) return ConfigInfoResponse(dataset_info=config_info, partial=partial) class ConfigInfoJobRunner(ConfigJobRunner): @staticmethod def get_job_type() -> str: return 'config-info' def compute(self) -> CompleteJobResult: return CompleteJobResult(compute_config_info_response(dataset=self.dataset, config=self.config)) # File: dataset-viewer-main/services/worker/src/worker/job_runners/config/is_valid.py import logging from http import HTTPStatus from libcommon.exceptions import PreviousStepFormatError from libcommon.simple_cache import CachedArtifactNotFoundError, get_response from worker.dtos import IsValidResponse, JobResult from worker.job_runners.config.config_job_runner import ConfigJobRunner from worker.utils import get_split_names def compute_is_valid_response(dataset: str, config: str) -> tuple[IsValidResponse, float]: logging.info(f"compute 'config-is-valid' response for dataset={dataset!r} config={config!r}") preview = False viewer = False search = False filter = False statistics = False try: split_names = get_split_names(dataset=dataset, config=config) except PreviousStepFormatError: raise except Exception: logging.debug("Erroneous response, or no response found, in previous step for this dataset: 'config-split-names'.") return (IsValidResponse(preview=preview, viewer=viewer, search=search, filter=filter, statistics=statistics), 0.0) try: total = 0 pending = 0 for split in split_names: total += 1 try: response = get_response(kind='split-is-valid', dataset=dataset, config=config, split=split) except CachedArtifactNotFoundError: logging.debug("No response found in previous step for this dataset: 'split-is-valid'.") pending += 1 continue if response['http_status'] != HTTPStatus.OK: logging.debug(f"Previous step gave an error: {response['http_status']}.") continue split_is_valid_content = response['content'] preview = preview or split_is_valid_content['preview'] viewer = viewer or split_is_valid_content['viewer'] search = search or split_is_valid_content['search'] filter = filter or split_is_valid_content['filter'] statistics = statistics or split_is_valid_content['statistics'] except Exception as e: raise PreviousStepFormatError('Previous step did not return the expected content.', e) from e progress = (total - pending) / total if total else 1.0 return (IsValidResponse(preview=preview, viewer=viewer, search=search, filter=filter, statistics=statistics), progress) class ConfigIsValidJobRunner(ConfigJobRunner): @staticmethod def get_job_type() -> str: return 'config-is-valid' def compute(self) -> JobResult: (response_content, progress) = compute_is_valid_response(dataset=self.dataset, config=self.config) return JobResult(response_content, progress=progress) # File: dataset-viewer-main/services/worker/src/worker/job_runners/config/opt_in_out_urls_count.py import logging from http import HTTPStatus from libcommon.exceptions import PreviousStepFormatError from libcommon.simple_cache import CachedArtifactNotFoundError, get_response from worker.dtos import JobResult, OptInOutUrlsCountResponse from worker.job_runners.config.config_job_runner import ConfigJobRunner from worker.utils import get_split_names def compute_opt_in_out_urls_count_response(dataset: str, config: str) -> tuple[OptInOutUrlsCountResponse, float]: logging.info(f"compute 'config-opt-in-out-urls-count' for dataset={dataset!r} config={config!r}") urls_columns = [] num_opt_in_urls = 0 num_opt_out_urls = 0 num_urls = 0 num_scanned_rows = 0 full_scan_count = 0 splits = get_split_names(dataset=dataset, config=config) try: total = 0 pending = 0 for split in splits: total += 1 try: response = get_response(kind='split-opt-in-out-urls-count', dataset=dataset, config=config, split=split) except CachedArtifactNotFoundError: logging.debug("No response found in previous step for this dataset: 'split-opt-in-out-urls-count'.") pending += 1 continue if response['http_status'] != HTTPStatus.OK: logging.debug(f"Previous step gave an error: {response['http_status']}.") continue split_opt_in_out_content = response['content'] urls_columns.extend(split_opt_in_out_content['urls_columns']) num_opt_in_urls += split_opt_in_out_content['num_opt_in_urls'] num_opt_out_urls += split_opt_in_out_content['num_opt_out_urls'] num_urls += split_opt_in_out_content['num_urls'] num_scanned_rows += split_opt_in_out_content['num_scanned_rows'] full_scan_count += 1 if split_opt_in_out_content['full_scan'] else 0 except Exception as e: raise PreviousStepFormatError('Previous step did not return the expected content.', e) from e unique_urls_columns = sorted(list(set(urls_columns))) has_urls_columns = len(unique_urls_columns) > 0 progress = (total - pending) / total if total else 1.0 full_scan = full_scan_count == total return (OptInOutUrlsCountResponse(urls_columns=unique_urls_columns, has_urls_columns=has_urls_columns, num_opt_in_urls=num_opt_in_urls, num_opt_out_urls=num_opt_out_urls, num_scanned_rows=num_scanned_rows, num_urls=num_urls, full_scan=full_scan), progress) class ConfigOptInOutUrlsCountJobRunner(ConfigJobRunner): @staticmethod def get_job_type() -> str: return 'config-opt-in-out-urls-count' def compute(self) -> JobResult: (response_content, progress) = compute_opt_in_out_urls_count_response(dataset=self.dataset, config=self.config) return JobResult(response_content, progress=progress) # File: dataset-viewer-main/services/worker/src/worker/job_runners/config/parquet.py import logging from libcommon.exceptions import PreviousStepFormatError from libcommon.simple_cache import get_previous_step_or_raise from worker.dtos import CompleteJobResult, ConfigParquetResponse from worker.job_runners.config.config_job_runner import ConfigJobRunner def compute_parquet_response(dataset: str, config: str) -> ConfigParquetResponse: logging.info(f"compute 'config-parquet' for dataset={dataset!r} config={config!r}") previous_step = 'config-parquet-and-info' config_parquet_and_info_response = get_previous_step_or_raise(kind=previous_step, dataset=dataset, config=config) content = config_parquet_and_info_response['content'] try: parquet_files = [parquet_file for parquet_file in content['parquet_files'] if parquet_file.get('config') == config] parquet_files.sort(key=lambda x: (x['split'], x['filename'])) if 'features' in content['dataset_info'] and isinstance(content['dataset_info']['features'], dict): features = content['dataset_info']['features'] else: features = None partial = content['partial'] except KeyError as e: raise PreviousStepFormatError("Previous step did not return the expected content: 'parquet_files'.", e) from e return ConfigParquetResponse(parquet_files=parquet_files, features=features, partial=partial) class ConfigParquetJobRunner(ConfigJobRunner): @staticmethod def get_job_type() -> str: return 'config-parquet' def compute(self) -> CompleteJobResult: return CompleteJobResult(compute_parquet_response(dataset=self.dataset, config=self.config)) # File: dataset-viewer-main/services/worker/src/worker/job_runners/config/parquet_metadata.py import functools import logging from typing import Optional from libcommon.constants import PARQUET_REVISION from libcommon.dtos import JobInfo, SplitHubFile from libcommon.exceptions import FileSystemError, ParquetResponseEmptyError, PreviousStepFormatError from libcommon.parquet_utils import extract_split_directory_from_parquet_url from libcommon.simple_cache import get_previous_step_or_raise from libcommon.storage import StrPath from libcommon.viewer_utils.parquet_metadata import create_parquet_metadata_file from pyarrow.parquet import ParquetFile from tqdm.contrib.concurrent import thread_map from worker.config import AppConfig from worker.dtos import CompleteJobResult, ConfigParquetMetadataResponse, ParquetFileMetadataItem from worker.job_runners.config.config_job_runner import ConfigJobRunner from worker.utils import hffs_parquet_url, retry_on_arrow_invalid_open_file def create_parquet_metadata_file_from_remote_parquet(parquet_file_item: SplitHubFile, hf_endpoint: str, hf_token: Optional[str], parquet_metadata_directory: StrPath) -> ParquetFileMetadataItem: split_directory = extract_split_directory_from_parquet_url(parquet_file_item['url']) hfh_parquet_file_path = hffs_parquet_url(repo_id=parquet_file_item['dataset'], config=parquet_file_item['config'], split_directory=split_directory, filename=parquet_file_item['filename']) try: f = retry_on_arrow_invalid_open_file(file_url=hfh_parquet_file_path, hf_endpoint=hf_endpoint, hf_token=hf_token, revision=PARQUET_REVISION) parquet_file_metadata = ParquetFile(f).metadata except Exception as e: raise FileSystemError(f'Could not read the parquet files: {e}') from e split = parquet_file_item['url'].split('/')[-2] parquet_metadata_subpath = create_parquet_metadata_file(dataset=parquet_file_item['dataset'], config=parquet_file_item['config'], split=split, parquet_file_metadata=parquet_file_metadata, filename=parquet_file_item['filename'], parquet_metadata_directory=parquet_metadata_directory) f.close() return ParquetFileMetadataItem(dataset=parquet_file_item['dataset'], config=parquet_file_item['config'], split=parquet_file_item['split'], url=parquet_file_item['url'], filename=parquet_file_item['filename'], size=parquet_file_item['size'], num_rows=parquet_file_metadata.num_rows, parquet_metadata_subpath=parquet_metadata_subpath) def compute_parquet_metadata_response(dataset: str, config: str, hf_endpoint: str, hf_token: Optional[str], parquet_metadata_directory: StrPath) -> ConfigParquetMetadataResponse: logging.info(f"compute 'config-parquet-metadata' for dataset={dataset!r} config={config!r}") config_parquet_response = get_previous_step_or_raise(kind='config-parquet', dataset=dataset, config=config) try: parquet_files_content = config_parquet_response['content']['parquet_files'] parquet_file_items: list[SplitHubFile] = [parquet_file_item for parquet_file_item in parquet_files_content if parquet_file_item['config'] == config] if not parquet_file_items: raise ParquetResponseEmptyError('No parquet files found.') content = config_parquet_response['content'] if 'features' in content and isinstance(content['features'], dict): features = content['features'] else: features = None partial = config_parquet_response['content']['partial'] except Exception as e: raise PreviousStepFormatError('Previous step did not return the expected content.') from e desc = f'{dataset}/{config}' parquet_files_metadata: list[ParquetFileMetadataItem] = thread_map(functools.partial(create_parquet_metadata_file_from_remote_parquet, hf_endpoint=hf_endpoint, hf_token=hf_token, parquet_metadata_directory=parquet_metadata_directory), parquet_file_items, desc=desc, unit='pq', disable=True) return ConfigParquetMetadataResponse(parquet_files_metadata=parquet_files_metadata, features=features, partial=partial) class ConfigParquetMetadataJobRunner(ConfigJobRunner): parquet_metadata_directory: StrPath @staticmethod def get_job_type() -> str: return 'config-parquet-metadata' def __init__(self, job_info: JobInfo, app_config: AppConfig, parquet_metadata_directory: StrPath) -> None: super().__init__(job_info=job_info, app_config=app_config) self.parquet_metadata_directory = parquet_metadata_directory def compute(self) -> CompleteJobResult: return CompleteJobResult(compute_parquet_metadata_response(dataset=self.dataset, config=self.config, hf_endpoint=self.app_config.common.hf_endpoint, hf_token=self.app_config.common.hf_token, parquet_metadata_directory=self.parquet_metadata_directory)) # File: dataset-viewer-main/services/worker/src/worker/job_runners/config/size.py import logging from libcommon.exceptions import PreviousStepFormatError from libcommon.simple_cache import get_previous_step_or_raise from worker.dtos import CompleteJobResult, ConfigSize, ConfigSizeResponse, SplitSize from worker.job_runners.config.config_job_runner import ConfigJobRunner def compute_config_size_response(dataset: str, config: str) -> ConfigSizeResponse: logging.info(f"compute 'config-size' for dataset={dataset!r} config={config!r}") dataset_info_response = get_previous_step_or_raise(kind='config-parquet-and-info', dataset=dataset, config=config) content = dataset_info_response['content'] if 'dataset_info' not in content: raise PreviousStepFormatError("Previous step did not return the expected content: 'dataset_info'.") if not isinstance(content['dataset_info'], dict): raise PreviousStepFormatError('Previous step did not return the expected content.', TypeError(f"dataset_info should be a dict, but got {type(content['dataset_info'])}")) if content['estimated_dataset_info'] is not None and (not isinstance(content['estimated_dataset_info'], dict)): raise PreviousStepFormatError('Previous step did not return the expected content.', TypeError(f"estimated_info should be a dict, but got {type(content['dataset_info'])}")) try: config_info = content['dataset_info'] config_estimated_info = content['estimated_dataset_info'] num_columns = len(config_info['features']) split_sizes: list[SplitSize] = [{'dataset': dataset, 'config': config, 'split': split_info['name'], 'num_bytes_parquet_files': sum((x['size'] for x in content['parquet_files'] if x['config'] == config and x['split'] == split_info['name'])), 'num_bytes_memory': split_info['num_bytes'] if 'num_bytes' in split_info else 0, 'num_rows': split_info['num_examples'] if 'num_examples' in split_info else 0, 'num_columns': num_columns, 'estimated_num_rows': config_estimated_info['splits'][split_info['name']]['num_examples'] if isinstance(config_estimated_info, dict) and 'splits' in config_estimated_info and ('name' in split_info) and (split_info['name'] in config_estimated_info['splits']) and ('num_examples' in config_estimated_info['splits'][split_info['name']]) else None} for split_info in config_info['splits'].values()] config_size = ConfigSize({'dataset': dataset, 'config': config, 'num_bytes_original_files': config_info.get('download_size'), 'num_bytes_parquet_files': sum((split_size['num_bytes_parquet_files'] for split_size in split_sizes)), 'num_bytes_memory': sum((split_size['num_bytes_memory'] for split_size in split_sizes)), 'num_rows': sum((split_size['num_rows'] for split_size in split_sizes)), 'num_columns': num_columns, 'estimated_num_rows': sum((split_size['estimated_num_rows'] or split_size['num_rows'] for split_size in split_sizes)) if any((split_size['estimated_num_rows'] for split_size in split_sizes)) else None}) partial = content['partial'] except Exception as e: raise PreviousStepFormatError('Previous step did not return the expected content.', e) from e return ConfigSizeResponse({'size': {'config': config_size, 'splits': split_sizes}, 'partial': partial}) class ConfigSizeJobRunner(ConfigJobRunner): @staticmethod def get_job_type() -> str: return 'config-size' def compute(self) -> CompleteJobResult: return CompleteJobResult(compute_config_size_response(dataset=self.dataset, config=self.config)) # File: dataset-viewer-main/services/worker/src/worker/job_runners/config/split_names.py import logging from typing import Optional from datasets import get_dataset_split_names from datasets.data_files import EmptyDatasetError as _EmptyDatasetError from libcommon.dtos import FullSplitItem from libcommon.exceptions import DatasetWithScriptNotSupportedError, DatasetWithTooManySplitsError, EmptyDatasetError, PreviousStepFormatError, SplitNamesFromStreamingError from libcommon.simple_cache import CachedArtifactError, CachedArtifactNotFoundError, get_previous_step_or_raise from worker.dtos import CompleteJobResult, SplitsList from worker.job_runners.config.config_job_runner import ConfigJobRunnerWithDatasetsCache def compute_split_names_from_streaming_response(dataset: str, config: str, max_number: int, hf_token: Optional[str]=None) -> SplitsList: logging.info(f"compute 'config-split-names' using streaming for dataset={dataset!r} config={config!r}") try: split_name_items: list[FullSplitItem] = [{'dataset': dataset, 'config': config, 'split': str(split)} for split in get_dataset_split_names(path=dataset, config_name=config, token=hf_token)] except _EmptyDatasetError as err: raise EmptyDatasetError('The dataset is empty.', cause=err) from err except Exception as err: if isinstance(err, ValueError) and 'trust_remote_code' in str(err): raise DatasetWithScriptNotSupportedError from err raise SplitNamesFromStreamingError(f"Cannot get the split names for the config '{config}' of the dataset.", cause=err) from err if len(split_name_items) > max_number: split_examples = ', '.join([split_name_item['split'] for split_name_item in split_name_items[:5]]) raise DatasetWithTooManySplitsError(f'The {config} config contains {len(split_name_items)} while it should generally contain 3 splits maximum (train/validation/test). If the splits {split_examples}... are not used to differentiate between training and evaluation, please consider defining configs of this dataset instead. You can find how to define configs instead of splits here: https://huggingface.co/docs/hub/datasets-data-files-configuration') return SplitsList(splits=split_name_items) def compute_split_names_from_info_response(dataset: str, config: str, max_number: int) -> SplitsList: logging.info(f"compute 'config-split-names' from config-info for dataset={dataset!r} config={config!r}") config_info_response = get_previous_step_or_raise(kind='config-info', dataset=dataset, config=config) try: splits_content = config_info_response['content']['dataset_info']['splits'] except Exception as e: raise PreviousStepFormatError("Previous step 'config-info' did not return the expected content.") from e split_name_items: list[FullSplitItem] = [{'dataset': dataset, 'config': config, 'split': str(split)} for split in splits_content] if len(split_name_items) > max_number: split_examples = ', '.join([split_name_item['split'] for split_name_item in split_name_items[:5]]) raise DatasetWithTooManySplitsError(f'The {config} config contains {len(split_name_items)} while it should generally contain 3 splits maximum (train/validation/test). If the splits {split_examples}... are not used to differentiate between training and evaluation, please consider defining configs of this dataset instead. You can find how to define configs instead of splits here: https://huggingface.co/docs/hub/datasets-data-files-configuration') return SplitsList(splits=split_name_items) class ConfigSplitNamesJobRunner(ConfigJobRunnerWithDatasetsCache): @staticmethod def get_job_type() -> str: return 'config-split-names' def compute(self) -> CompleteJobResult: try: return CompleteJobResult(compute_split_names_from_info_response(dataset=self.dataset, config=self.config, max_number=self.app_config.split_names.max_number)) except (CachedArtifactError, CachedArtifactNotFoundError): logging.info(f"Cannot compute 'config-split-names' from config-info for self.dataset={self.dataset!r} self.config={self.config!r}. Trying to compute it using streaming.") pass return CompleteJobResult(compute_split_names_from_streaming_response(dataset=self.dataset, config=self.config, max_number=self.app_config.split_names.max_number, hf_token=self.app_config.common.hf_token)) # File: dataset-viewer-main/services/worker/src/worker/job_runners/dataset/compatible_libraries.py import logging import re from http import HTTPStatus from itertools import islice from pathlib import Path from typing import Any, Callable, Optional import datasets.config import datasets.data_files import yaml from datasets import BuilderConfig, DownloadConfig from datasets.data_files import NON_WORDS_CHARS, DataFilesPatternsDict, DataFilesPatternsList, resolve_pattern from datasets.load import create_builder_configs_from_metadata_configs from datasets.packaged_modules import _MODULE_TO_EXTENSIONS, _PACKAGED_DATASETS_MODULES from datasets.utils.file_utils import cached_path from datasets.utils.metadata import MetadataConfigs from huggingface_hub import DatasetCard, DatasetCardData, HfFileSystem, hf_hub_url from libcommon.constants import LOADING_METHODS_MAX_CONFIGS from libcommon.croissant_utils import get_record_set from libcommon.exceptions import DatasetWithTooComplexDataFilesPatternsError, PreviousStepFormatError from libcommon.simple_cache import get_previous_step_or_raise from worker.dtos import CompatibleLibrary, CompleteJobResult, DatasetCompatibleLibrariesResponse, DatasetFormat, DatasetLibrary, LoadingCode from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunnerWithDatasetsCache BASE_PATTERNS_WITH_SEPARATOR = [pattern for pattern in datasets.data_files.KEYWORDS_IN_FILENAME_BASE_PATTERNS + datasets.data_files.KEYWORDS_IN_DIR_NAME_BASE_PATTERNS if '{sep}' in pattern] NON_WORD_GLOB_SEPARATOR = f'[{NON_WORDS_CHARS}]' NON_WORD_REGEX_SEPARATOR = NON_WORD_GLOB_SEPARATOR.replace('.', '\\.') if any((NON_WORD_GLOB_SEPARATOR not in pattern.format(keyword='train', sep=NON_WORDS_CHARS) for pattern in BASE_PATTERNS_WITH_SEPARATOR)) or not BASE_PATTERNS_WITH_SEPARATOR: raise ImportError(f'Current `datasets` version is not compatible with simplify_data_files_patterns() which expects as keyword separator {NON_WORD_GLOB_SEPARATOR} for glob patterns. Indeed the simplify_data_files_patterns() function is used to create human-readable code snippets with nice glob patterns for files, and therefore it replaces the ugly {NON_WORD_GLOB_SEPARATOR} separator with actual characters, for example\n**/*[-._ 0-9/]train[-._ 0-9/]** => **/*_train_*.jsonl\n\nTo fix this error, please update the simplify_data_files_patterns() to make it support `datasets` new separator and patterns. After the fix the get_builder_configs_with_simplified_data_files() should return proper simplified data files on most datasets.') def get_builder_configs_with_simplified_data_files(dataset: str, module_name: str, hf_token: Optional[str]=None) -> list[BuilderConfig]: builder_configs: list[BuilderConfig] base_path = f'hf://datasets/{dataset}' if HfFileSystem().exists(base_path + '/' + dataset.split('/')[-1] + '.py'): raise NotImplementedError('datasets with a script are not supported') download_config = DownloadConfig(token=hf_token) try: dataset_readme_path = cached_path(hf_hub_url(dataset, datasets.config.REPOCARD_FILENAME, repo_type='dataset'), download_config=download_config) dataset_card_data = DatasetCard.load(Path(dataset_readme_path)).data except FileNotFoundError: dataset_card_data = DatasetCardData() try: standalone_yaml_path = cached_path(hf_hub_url(dataset, datasets.config.REPOYAML_FILENAME, repo_type='dataset'), download_config=download_config) with open(standalone_yaml_path, 'r', encoding='utf-8') as f: standalone_yaml_data = yaml.safe_load(f.read()) if standalone_yaml_data: _dataset_card_data_dict = dataset_card_data.to_dict() _dataset_card_data_dict.update(standalone_yaml_data) dataset_card_data = DatasetCardData(**_dataset_card_data_dict) except FileNotFoundError: pass metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data) (module_path, _) = _PACKAGED_DATASETS_MODULES[module_name] (builder_configs, _) = create_builder_configs_from_metadata_configs(module_path, metadata_configs or MetadataConfigs({'default': {}}), supports_metadata=False, base_path=base_path, download_config=download_config) for config in builder_configs: data_files = config.data_files.resolve(base_path=base_path, download_config=download_config) config.data_files = DataFilesPatternsDict({str(split): simplify_data_files_patterns(data_files_patterns=config.data_files[split], base_path=base_path, download_config=download_config, allowed_extensions=_MODULE_TO_EXTENSIONS[module_name]) for split in data_files}) return builder_configs def simplify_data_files_patterns(data_files_patterns: DataFilesPatternsList, base_path: str, download_config: DownloadConfig, allowed_extensions: list[str]) -> DataFilesPatternsList: patterns = DataFilesPatternsList([], allowed_extensions=None) for pattern in data_files_patterns: if pattern == '**': pattern = '**/*' try: resolved_data_files = resolve_pattern(pattern, base_path=base_path, download_config=download_config, allowed_extensions=allowed_extensions) except FileNotFoundError: continue if len(resolved_data_files) == 1: return [resolved_data_files[0][len(base_path) + 1:]] if resolved_data_files: if '[0-9]' * 5 in pattern: new_pattern = pattern.replace('[0-9]' * 5 + '*', '*') new_pattern = new_pattern.replace('[0-9]' * 5, '*') try: re_resolved_data_files = resolve_pattern(new_pattern, base_path=base_path, download_config=download_config, allowed_extensions=allowed_extensions) except FileNotFoundError: continue if len(resolved_data_files) == len(re_resolved_data_files): pattern = new_pattern if NON_WORD_GLOB_SEPARATOR in pattern: re_match = re.match(pattern.replace('**/*', '.*').replace('*', '.*').replace(NON_WORD_GLOB_SEPARATOR, f'({NON_WORD_REGEX_SEPARATOR})'), resolved_data_files[0]) if re_match: new_pattern = pattern for non_word_char in re_match.groups(): if non_word_char in '1234567890': non_word_char = '[0-9]' new_pattern = new_pattern.replace(NON_WORD_GLOB_SEPARATOR, non_word_char, 1) try: re_resolved_data_files = resolve_pattern(new_pattern, base_path=base_path, download_config=download_config, allowed_extensions=allowed_extensions) except FileNotFoundError: continue if len(resolved_data_files) == len(re_resolved_data_files): pattern = new_pattern for allowed_extension in allowed_extensions: new_pattern = pattern if new_pattern.endswith('.**'): new_pattern = new_pattern[:-3] + allowed_extension elif new_pattern.endswith('**'): new_pattern = new_pattern[:-1] + allowed_extension elif new_pattern.endswith('.*'): new_pattern = new_pattern[:-2] + allowed_extension elif new_pattern.endswith('*'): new_pattern = new_pattern + allowed_extension try: re_resolved_data_files = resolve_pattern(new_pattern, base_path=base_path, download_config=download_config) except FileNotFoundError: new_pattern += '.' + resolved_data_files[0].split('.')[-1] try: re_resolved_data_files = resolve_pattern(new_pattern, base_path=base_path, download_config=download_config) except FileNotFoundError: continue if len(resolved_data_files) == len(re_resolved_data_files): pattern = new_pattern patterns.append(pattern.replace('**.', '*.')) return patterns LOGIN_COMMENT = '\n# Login using e.g. `huggingface-cli login` to access this dataset' DATASETS_CODE = 'from datasets import load_dataset\n{comment}\nds = load_dataset("{dataset}")' DATASETS_CODE_CONFIGS = 'from datasets import load_dataset\n{comment}\nds = load_dataset("{dataset}", "{config_name}")' MLCROISSANT_CODE_RECORD_SETS = 'from mlcroissant import Dataset\n{comment}\nds = Dataset(jsonld="https://huggingface.co/api/datasets/{dataset}/croissant")\nrecords = ds.records("{record_set}")' MLCROISSANT_CODE_RECORD_SETS_WITH_LOGIN = 'import requests\nfrom huggingface_hub.file_download import build_hf_headers\nfrom mlcroissant import Dataset\n{comment}\nheaders = build_hf_headers() # handles authentication\njsonld = requests.get("https://huggingface.co/api/datasets/{dataset}/croissant", headers=headers).json()\nds = Dataset(jsonld=jsonld)\nrecords = ds.records("{record_set}")' def get_hf_datasets_compatible_library(dataset: str, infos: list[dict[str, Any]], login_required: bool) -> CompatibleLibrary: return {'language': 'python', 'library': 'datasets', 'function': 'load_dataset', 'loading_codes': [{'config_name': info['config_name'], 'arguments': {'config_name': info['config_name']} if len(infos) > 1 else {}, 'code': DATASETS_CODE_CONFIGS.format(dataset=dataset, config_name=info['config_name'], comment=LOGIN_COMMENT if login_required else '') if len(infos) > 1 else DATASETS_CODE.format(dataset=dataset, comment=LOGIN_COMMENT if login_required else '')} for info in infos]} def get_mlcroissant_compatible_library(dataset: str, infos: list[dict[str, Any]], login_required: bool, partial: bool) -> CompatibleLibrary: comment = '\n# The Croissant metadata exposes the first 5GB of this dataset' if partial else '' if login_required: comment += LOGIN_COMMENT return {'language': 'python', 'library': 'mlcroissant', 'function': 'Dataset', 'loading_codes': [{'config_name': info['config_name'], 'arguments': {'record_set': get_record_set(dataset=dataset, config_name=info['config_name']), 'partial': partial}, 'code': MLCROISSANT_CODE_RECORD_SETS_WITH_LOGIN.format(dataset=dataset, record_set=get_record_set(dataset=dataset, config_name=info['config_name']), comment=comment) if login_required else MLCROISSANT_CODE_RECORD_SETS.format(dataset=dataset, record_set=get_record_set(dataset=dataset, config_name=info['config_name']), comment=comment)} for info in infos]} PANDAS_CODE = 'import pandas as pd\n{comment}\ndf = {function}("hf://datasets/{dataset}/{data_file}"{args})' PANDAS_CODE_SPLITS = 'import pandas as pd\n{comment}\nsplits = {splits}\ndf = {function}("hf://datasets/{dataset}/" + splits["{first_split}"{args}])' DASK_CODE = 'import dask.dataframe as dd\n{comment}\ndf = {function}("hf://datasets/{dataset}/{pattern}")' DASK_CODE_SPLITS = 'import dask.dataframe as dd\n{comment}\nsplits = {splits}\ndf = {function}("hf://datasets/{dataset}/" + splits["{first_split}"])' WEBDATASET_CODE = 'import webdataset as wds\nfrom huggingface_hub import HfFileSystem, get_token, hf_hub_url\n{comment}\nfs = HfFileSystem()\nfiles = [fs.resolve_path(path) for path in fs.glob("hf://datasets/{dataset}/{pattern}")]\nurls = [hf_hub_url(file.repo_id, file.path_in_repo, repo_type="dataset") for file in files]\nurls = f"pipe: curl -s -L -H \'Authorization:Bearer {{get_token()}}\' {{\'::\'.join(urls)}}"\n\nds = {function}(urls).decode()' WEBDATASET_CODE_SPLITS = 'import webdataset as wds\nfrom huggingface_hub import HfFileSystem, get_token, hf_hub_url\n\nsplits = {splits}\n{comment}\nfs = HfFileSystem()\nfiles = [fs.resolve_path(path) for path in fs.glob("hf://datasets/{dataset}/" + splits["{first_split}"])]\nurls = [hf_hub_url(file.repo_id, file.path_in_repo, repo_type="dataset") for file in files]\nurls = f"pipe: curl -s -L -H \'Authorization:Bearer {{get_token()}}\' {{\'::\'.join(urls)}}"\n\nds = {function}(urls).decode()' def get_compatible_libraries_for_json(dataset: str, hf_token: Optional[str], login_required: bool) -> CompatibleLibrary: library: DatasetLibrary builder_configs = get_builder_configs_with_simplified_data_files(dataset, module_name='json', hf_token=hf_token) for config in builder_configs: if any((len(data_files) != 1 for data_files in config.data_files.values())): raise DatasetWithTooComplexDataFilesPatternsError(f'Failed to simplify json data files pattern: {config.data_files}') loading_codes: list[LoadingCode] = [{'config_name': config.name, 'arguments': {'splits': {str(split): data_files[0] for (split, data_files) in config.data_files.items()}}, 'code': ''} for config in builder_configs] is_single_file = all(('*' not in data_file and '[' not in data_file for loading_code in loading_codes for data_file in loading_code['arguments']['splits'].values())) comment = LOGIN_COMMENT if login_required else '' if is_single_file: library = 'pandas' function = 'pd.read_json' for loading_code in loading_codes: first_file = f'datasets/{dataset}/' + next(iter(loading_code['arguments']['splits'].values())) if '.jsonl' in first_file or HfFileSystem(token=hf_token).open(first_file, 'r').read(1) != '[': args = ', lines=True' loading_code['arguments']['lines'] = True else: args = '' if len(loading_code['arguments']['splits']) == 1: data_file = next(iter(loading_code['arguments']['splits'].values())) loading_code['code'] = PANDAS_CODE.format(function=function, dataset=dataset, data_file=data_file, args=args, comment=comment) else: loading_code['code'] = PANDAS_CODE_SPLITS.format(function=function, dataset=dataset, splits=loading_code['arguments']['splits'], first_split=next(iter(loading_code['arguments']['splits'])), args=args, comment=comment) else: library = 'dask' function = 'dd.read_json' for loading_code in loading_codes: if len(loading_code['arguments']['splits']) == 1: pattern = next(iter(loading_code['arguments']['splits'].values())) loading_code['code'] = DASK_CODE.format(function=function, dataset=dataset, pattern=pattern, comment=comment) else: loading_code['code'] = DASK_CODE_SPLITS.format(function=function, dataset=dataset, splits=loading_code['arguments']['splits'], first_split=next(iter(loading_code['arguments']['splits'])), comment=comment) return {'language': 'python', 'library': library, 'function': function, 'loading_codes': loading_codes} def get_compatible_libraries_for_csv(dataset: str, hf_token: Optional[str], login_required: bool) -> CompatibleLibrary: library: DatasetLibrary builder_configs = get_builder_configs_with_simplified_data_files(dataset, module_name='csv', hf_token=hf_token) for config in builder_configs: if any((len(data_files) != 1 for data_files in config.data_files.values())): raise DatasetWithTooComplexDataFilesPatternsError(f'Failed to simplify csv data files pattern: {config.data_files}') loading_codes: list[LoadingCode] = [{'config_name': config.name, 'arguments': {'splits': {str(split): data_files[0] for (split, data_files) in config.data_files.items()}}, 'code': ''} for config in builder_configs] is_single_file = all(('*' not in data_file and '[' not in data_file for loading_code in loading_codes for data_file in loading_code['arguments']['splits'].values())) comment = LOGIN_COMMENT if login_required else '' if is_single_file: library = 'pandas' function = 'pd.read_csv' for loading_code in loading_codes: first_file = next(iter(loading_code['arguments']['splits'].values())) if '.tsv' in first_file: args = ', sep="\\t"' else: args = '' if len(loading_code['arguments']['splits']) == 1: data_file = next(iter(loading_code['arguments']['splits'].values())) loading_code['code'] = PANDAS_CODE.format(function=function, dataset=dataset, data_file=data_file, args=args, comment=comment) else: loading_code['code'] = PANDAS_CODE_SPLITS.format(function=function, dataset=dataset, splits=loading_code['arguments']['splits'], first_split=next(iter(loading_code['arguments']['splits'])), args=args, comment=comment) else: library = 'dask' function = 'dd.read_csv' for loading_code in loading_codes: if len(loading_code['arguments']['splits']) == 1: pattern = next(iter(loading_code['arguments']['splits'].values())) loading_code['code'] = DASK_CODE.format(function=function, dataset=dataset, pattern=pattern, comment=comment) else: loading_code['code'] = DASK_CODE_SPLITS.format(function=function, dataset=dataset, splits=loading_code['arguments']['splits'], first_split=next(iter(loading_code['arguments']['splits'])), comment=comment) return {'language': 'python', 'library': library, 'function': function, 'loading_codes': loading_codes} def get_compatible_libraries_for_parquet(dataset: str, hf_token: Optional[str], login_required: bool) -> CompatibleLibrary: library: DatasetLibrary builder_configs = get_builder_configs_with_simplified_data_files(dataset, module_name='parquet', hf_token=hf_token) for config in builder_configs: if any((len(data_files) != 1 for data_files in config.data_files.values())): raise DatasetWithTooComplexDataFilesPatternsError(f'Failed to simplify parquet data files pattern: {config.data_files}') loading_codes: list[LoadingCode] = [{'config_name': config.name, 'arguments': {'splits': {str(split): data_files[0] for (split, data_files) in config.data_files.items()}}, 'code': ''} for config in builder_configs] is_single_file = all(('*' not in data_file and '[' not in data_file for loading_code in loading_codes for data_file in loading_code['arguments']['splits'].values())) comment = LOGIN_COMMENT if login_required else '' if is_single_file: library = 'pandas' function = 'pd.read_parquet' for loading_code in loading_codes: if len(loading_code['arguments']['splits']) == 1: data_file = next(iter(loading_code['arguments']['splits'].values())) loading_code['code'] = PANDAS_CODE.format(function=function, dataset=dataset, data_file=data_file, args='', comment=comment) else: loading_code['code'] = PANDAS_CODE_SPLITS.format(function=function, dataset=dataset, splits=loading_code['arguments']['splits'], first_split=next(iter(loading_code['arguments']['splits'])), args='', comment=comment) else: library = 'dask' function = 'dd.read_parquet' for loading_code in loading_codes: if len(loading_code['arguments']['splits']) == 1: pattern = next(iter(loading_code['arguments']['splits'].values())) loading_code['code'] = DASK_CODE.format(function=function, dataset=dataset, pattern=pattern, comment=comment) else: loading_code['code'] = DASK_CODE_SPLITS.format(function=function, dataset=dataset, splits=loading_code['arguments']['splits'], first_split=next(iter(loading_code['arguments']['splits'])), comment=comment) return {'language': 'python', 'library': library, 'function': function, 'loading_codes': loading_codes} def get_compatible_libraries_for_webdataset(dataset: str, hf_token: Optional[str], login_required: bool) -> CompatibleLibrary: library: DatasetLibrary builder_configs = get_builder_configs_with_simplified_data_files(dataset, module_name='webdataset', hf_token=hf_token) for config in builder_configs: if any((len(data_files) != 1 for data_files in config.data_files.values())): raise DatasetWithTooComplexDataFilesPatternsError(f'Failed to simplify webdataset data files pattern: {config.data_files}') loading_codes: list[LoadingCode] = [{'config_name': config.name, 'arguments': {'splits': {str(split): data_files[0] for (split, data_files) in config.data_files.items()}}, 'code': ''} for config in builder_configs] library = 'webdataset' function = 'wds.WebDataset' comment = LOGIN_COMMENT if login_required else '' for loading_code in loading_codes: if len(loading_code['arguments']['splits']) == 1: pattern = next(iter(loading_code['arguments']['splits'].values())) loading_code['code'] = WEBDATASET_CODE.format(function=function, dataset=dataset, pattern=pattern, comment=comment) else: loading_code['code'] = WEBDATASET_CODE_SPLITS.format(function=function, dataset=dataset, splits=loading_code['arguments']['splits'], first_split=next(iter(loading_code['arguments']['splits'])), comment=comment) return {'language': 'python', 'library': library, 'function': function, 'loading_codes': loading_codes} def get_polars_compatible_library(builder_name: str, dataset: str, hf_token: Optional[str], login_required: bool) -> Optional[CompatibleLibrary]: if builder_name in ['parquet', 'csv', 'json']: builder_configs = get_builder_configs_with_simplified_data_files(dataset, module_name=builder_name, hf_token=hf_token) for config in builder_configs: if any((len(data_files) != 1 for data_files in config.data_files.values())): raise DatasetWithTooComplexDataFilesPatternsError(f'Failed to simplify parquet data files pattern: {config.data_files}') loading_codes: list[LoadingCode] = [{'config_name': config.name, 'arguments': {'splits': {str(split): data_files[0] for (split, data_files) in config.data_files.items()}}, 'code': ''} for config in builder_configs] compatible_library: CompatibleLibrary = {'language': 'python', 'library': 'polars', 'function': '', 'loading_codes': []} def fmt_code(*, read_func: str, splits: dict[str, str], args: str, dataset: str=dataset, login_required: bool=login_required) -> str: if not (args.startswith(', ') or args == ''): msg = f'incorrect args format: args = {args!s}' raise ValueError(msg) login_comment = LOGIN_COMMENT if login_required else '' if len(splits) == 1: path = next(iter(splits.values())) return f"import polars as pl\n{login_comment}\ndf = pl.{read_func}('hf://datasets/{dataset}/{path}'{args})\n" else: first_split = next(iter(splits)) return f"import polars as pl\n{login_comment}\nsplits = {splits}\ndf = pl.{read_func}('hf://datasets/{dataset}/' + splits['{first_split}']{args})\n" args = '' if builder_name == 'parquet': read_func = 'read_parquet' compatible_library['function'] = f'pl.{read_func}' elif builder_name == 'csv': read_func = 'read_csv' compatible_library['function'] = f'pl.{read_func}' first_file = next(iter(loading_codes[0]['arguments']['splits'].values())) if '.tsv' in first_file: args = f"{args}, separator='\\t'" elif builder_name == 'json': first_file = f'datasets/{dataset}/' + next(iter(loading_codes[0]['arguments']['splits'].values())) if '*' in first_file: return None is_json_lines = '.jsonl' in first_file or HfFileSystem(token=hf_token).open(first_file, 'r').read(1) != '[' if is_json_lines: read_func = 'read_ndjson' else: read_func = 'read_json' compatible_library['function'] = f'pl.{read_func}' else: return None for loading_code in loading_codes: splits = loading_code['arguments']['splits'] loading_code['code'] = fmt_code(read_func=read_func, splits=splits, args=args) compatible_library['loading_codes'] = loading_codes return compatible_library get_compatible_library_for_builder: dict[str, Callable[[str, Optional[str], bool], CompatibleLibrary]] = {'webdataset': get_compatible_libraries_for_webdataset, 'json': get_compatible_libraries_for_json, 'csv': get_compatible_libraries_for_csv, 'parquet': get_compatible_libraries_for_parquet} get_format_for_builder: dict[str, DatasetFormat] = {'webdataset': 'webdataset', 'json': 'json', 'csv': 'csv', 'parquet': 'parquet', 'imagefolder': 'imagefolder', 'audiofolder': 'audiofolder', 'text': 'text', 'arrow': 'arrow'} def compute_compatible_libraries_response(dataset: str, hf_token: Optional[str]=None) -> DatasetCompatibleLibrariesResponse: logging.info(f"compute 'dataset-compatible-libraries' for dataset={dataset!r}") dataset_info_response = get_previous_step_or_raise(kind='dataset-info', dataset=dataset) http_status = dataset_info_response['http_status'] login_required = True try: login_required = not HfFileSystem(token='no_token').isdir('datasets/' + dataset) except NotImplementedError: pass libraries: list[CompatibleLibrary] = [] formats: list[DatasetFormat] = [] infos: list[dict[str, Any]] = [] builder_name: Optional[str] = None if http_status == HTTPStatus.OK: try: content = dataset_info_response['content'] infos = list(islice(content['dataset_info'].values(), LOADING_METHODS_MAX_CONFIGS)) partial = content['partial'] except KeyError as e: raise PreviousStepFormatError("Previous step 'dataset-info' did not return the expected content.", e) from e if infos: libraries.append(get_hf_datasets_compatible_library(dataset, infos=infos, login_required=login_required)) builder_name = infos[0]['builder_name'] if builder_name in get_format_for_builder: formats.append(get_format_for_builder[builder_name]) if builder_name in get_compatible_library_for_builder: try: compatible_library = get_compatible_library_for_builder[builder_name](dataset, hf_token, login_required) libraries.append(compatible_library) except NotImplementedError: pass libraries.append(get_mlcroissant_compatible_library(dataset, infos, login_required=login_required, partial=partial)) if isinstance(builder_name, str) and (v := get_polars_compatible_library(builder_name, dataset, hf_token=hf_token, login_required=login_required)) is not None: libraries.append(v) return DatasetCompatibleLibrariesResponse(libraries=libraries, formats=formats) class DatasetCompatibleLibrariesJobRunner(DatasetJobRunnerWithDatasetsCache): @staticmethod def get_job_type() -> str: return 'dataset-compatible-libraries' def compute(self) -> CompleteJobResult: response_content = compute_compatible_libraries_response(dataset=self.dataset, hf_token=self.app_config.common.hf_token) return CompleteJobResult(response_content) # File: dataset-viewer-main/services/worker/src/worker/job_runners/dataset/config_names.py import logging from typing import Optional from datasets import get_dataset_config_names, get_dataset_default_config_name from datasets.data_files import EmptyDatasetError as _EmptyDatasetError from datasets.exceptions import DataFilesNotFoundError as _DataFilesNotFoundError from datasets.exceptions import DatasetNotFoundError from huggingface_hub.utils import HfHubHTTPError from libcommon.exceptions import ConfigNamesError, DataFilesNotFoundError, DatasetWithScriptNotSupportedError, DatasetWithTooManyConfigsError, EmptyDatasetError, FileFormatMismatchBetweenSplitsError, RetryableConfigNamesError from worker.dtos import CompleteJobResult, ConfigNameItem, DatasetConfigNamesResponse from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunnerWithDatasetsCache def compute_config_names_response(dataset: str, max_number: int, hf_token: Optional[str]=None) -> DatasetConfigNamesResponse: logging.info(f"compute 'dataset-config-names' for dataset={dataset!r}") try: default_config_name: Optional[str] = None config_names = get_dataset_config_names(path=dataset, token=hf_token) if len(config_names) > 1: default_config_name = get_dataset_default_config_name(path=dataset, token=hf_token) config_name_items: list[ConfigNameItem] = [{'dataset': dataset, 'config': str(config)} for config in sorted(config_names, key=lambda config_name: (config_name != default_config_name, config_name))] except _EmptyDatasetError as err: raise EmptyDatasetError('The dataset is empty.', cause=err) from err except _DataFilesNotFoundError as err: raise DataFilesNotFoundError(str(err), cause=err) from err except ValueError as err: if 'trust_remote_code' in str(err): raise DatasetWithScriptNotSupportedError from err if "Couldn't infer the same data file format for all splits" in str(err): raise FileFormatMismatchBetweenSplitsError(str(err), cause=err) from err raise ConfigNamesError('Cannot get the config names for the dataset.', cause=err) from err except (HfHubHTTPError, BrokenPipeError, DatasetNotFoundError, PermissionError, ConnectionError) as err: raise RetryableConfigNamesError('Cannot get the config names for the dataset.', cause=err) from err except Exception as err: raise ConfigNamesError('Cannot get the config names for the dataset.', cause=err) from err number_of_configs = len(config_name_items) if number_of_configs > max_number: raise DatasetWithTooManyConfigsError(f'The maximum number of configs allowed is {max_number}, dataset has {number_of_configs} configs.') return DatasetConfigNamesResponse(config_names=config_name_items) class DatasetConfigNamesJobRunner(DatasetJobRunnerWithDatasetsCache): @staticmethod def get_job_type() -> str: return 'dataset-config-names' def compute(self) -> CompleteJobResult: return CompleteJobResult(compute_config_names_response(dataset=self.dataset, hf_token=self.app_config.common.hf_token, max_number=self.app_config.config_names.max_number)) # File: dataset-viewer-main/services/worker/src/worker/job_runners/dataset/croissant_crumbs.py import logging import re from collections.abc import Mapping from itertools import islice from typing import Any from datasets import Features from libcommon.constants import CROISSANT_MAX_CONFIGS from libcommon.croissant_utils import feature_to_croissant_field, get_record_set from libcommon.exceptions import PreviousStepFormatError from libcommon.simple_cache import get_previous_step_or_raise from worker.dtos import CompleteJobResult from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner NAME_PATTERN_REGEX = '[^a-zA-Z0-9\\-_\\.]' def _escape_name(name: str, names: set[str]) -> str: escaped_name = re.sub(NAME_PATTERN_REGEX, '_', name) while escaped_name in names: escaped_name = f'{escaped_name}_0' names.add(escaped_name) return escaped_name def _remove_none_values(json: Mapping[str, Any]) -> Mapping[str, Any]: return {k: v for (k, v) in json.items() if v is not None} def get_croissant_crumbs_from_dataset_infos(dataset: str, infos: list[Mapping[str, Any]], partial: bool, truncated_configs: bool) -> Mapping[str, Any]: repo_name = 'repo' names: set[str] = set(repo_name) distribution = [_remove_none_values({'@type': 'cr:FileObject', '@id': repo_name, 'name': repo_name, 'description': 'The Hugging Face git repository.', 'contentUrl': f'https://huggingface.co/datasets/{dataset}/tree/refs%2Fconvert%2Fparquet', 'encodingFormat': 'git+https', 'sha256': 'https://github.com/mlcommons/croissant/issues/80'})] record_set = [] for info in infos: description_body = '' config = info['config_name'] features = Features.from_dict(info['features']) fields: list[dict[str, Any]] = [] splits = list(info['splits']) distribution_name = _escape_name(f'parquet-files-for-config-{config}', names) distribution.append(_remove_none_values({'@type': 'cr:FileSet', '@id': distribution_name, 'name': distribution_name, 'description': 'The underlying Parquet files as converted by Hugging Face (see: https://huggingface.co/docs/dataset-viewer/parquet).', 'containedIn': {'@id': repo_name}, 'encodingFormat': 'application/x-parquet', 'includes': f'{config}/*/*.parquet'})) skipped_columns = [] record_set_name = get_record_set(dataset=dataset, config_name=config) record_set_name = _escape_name(record_set_name, names) for (column, feature) in features.items(): fields_names: set[str] = set() field_name = f'{record_set_name}/{_escape_name(column, fields_names)}' field = feature_to_croissant_field(distribution_name, field_name, column, feature) if field: fields.append(field) else: skipped_columns.append(column) description = f"{dataset} - '{config}' subset" if partial: description += ' (first 5GB)' if truncated_configs: description += f' (only the first {CROISSANT_MAX_CONFIGS} subsets are included in this metadata)' if len(splits) > 1: description_body += f"\n- {len(splits)} split{('s' if len(splits) > 1 else '')}: {', '.join(splits)}" if skipped_columns: description_body += f"\n- {len(skipped_columns)} skipped column{('s' if len(skipped_columns) > 1 else '')}: {', '.join(skipped_columns)}" if description_body: description += '\n\nAdditional information:' description += description_body record_set.append(_remove_none_values({'@type': 'cr:RecordSet', '@id': record_set_name, 'name': record_set_name, 'description': description, 'field': fields})) context = {'@language': 'en', '@vocab': 'https://schema.org/', 'citeAs': 'cr:citeAs', 'column': 'cr:column', 'conformsTo': 'dct:conformsTo', 'cr': 'http://mlcommons.org/croissant/', 'data': {'@id': 'cr:data', '@type': '@json'}, 'dataBiases': 'cr:dataBiases', 'dataCollection': 'cr:dataCollection', 'dataType': {'@id': 'cr:dataType', '@type': '@vocab'}, 'dct': 'http://purl.org/dc/terms/', 'extract': 'cr:extract', 'field': 'cr:field', 'fileProperty': 'cr:fileProperty', 'fileObject': 'cr:fileObject', 'fileSet': 'cr:fileSet', 'format': 'cr:format', 'includes': 'cr:includes', 'isLiveDataset': 'cr:isLiveDataset', 'jsonPath': 'cr:jsonPath', 'key': 'cr:key', 'md5': 'cr:md5', 'parentField': 'cr:parentField', 'path': 'cr:path', 'personalSensitiveInformation': 'cr:personalSensitiveInformation', 'recordSet': 'cr:recordSet', 'references': 'cr:references', 'regex': 'cr:regex', 'repeated': 'cr:repeated', 'replace': 'cr:replace', 'sc': 'https://schema.org/', 'separator': 'cr:separator', 'source': 'cr:source', 'subField': 'cr:subField', 'transform': 'cr:transform'} return _remove_none_values({'@context': context, '@type': 'sc:Dataset', 'conformsTo': 'http://mlcommons.org/croissant/1.0', 'distribution': distribution, 'recordSet': record_set}) def compute_croissant_crumbs_response(dataset: str) -> Mapping[str, Any]: logging.info(f"compute 'dataset-croissant-crumbs' for dataset={dataset!r}") dataset_info_response = get_previous_step_or_raise(kind='dataset-info', dataset=dataset) try: content = dataset_info_response['content'] truncated_configs = len(content['dataset_info']) > CROISSANT_MAX_CONFIGS infos = list(islice(content['dataset_info'].values(), CROISSANT_MAX_CONFIGS)) partial = content['partial'] croissant_crumbs = get_croissant_crumbs_from_dataset_infos(dataset=dataset, infos=infos, partial=partial, truncated_configs=truncated_configs) except KeyError as e: raise PreviousStepFormatError("Previous step 'dataset-info' did not return the expected content.", e) from e return croissant_crumbs class DatasetCroissantCrumbsJobRunner(DatasetJobRunner): @staticmethod def get_job_type() -> str: return 'dataset-croissant-crumbs' def compute(self) -> CompleteJobResult: response_content = compute_croissant_crumbs_response(dataset=self.dataset) return CompleteJobResult(response_content) # File: dataset-viewer-main/services/worker/src/worker/job_runners/dataset/dataset_job_runner.py from pathlib import Path from libcommon.dtos import JobInfo from libcommon.exceptions import ParameterMissingError from worker.config import AppConfig from worker.job_runner import JobRunner from worker.job_runners._job_runner_with_datasets_cache import JobRunnerWithDatasetsCache class DatasetJobRunner(JobRunner): dataset: str dataset_git_revision: str def __init__(self, job_info: JobInfo, app_config: AppConfig) -> None: super().__init__(job_info=job_info, app_config=app_config) if job_info['params']['dataset'] is None: raise ParameterMissingError("'dataset' parameter is required") if job_info['params']['revision'] is None: raise ParameterMissingError("'revision' parameter is required") self.dataset = job_info['params']['dataset'] self.dataset_git_revision = job_info['params']['revision'] class DatasetJobRunnerWithDatasetsCache(JobRunnerWithDatasetsCache, DatasetJobRunner): def __init__(self, job_info: JobInfo, app_config: AppConfig, hf_datasets_cache: Path) -> None: JobRunnerWithDatasetsCache.__init__(self=self, job_info=job_info, app_config=app_config, hf_datasets_cache=hf_datasets_cache) DatasetJobRunner.__init__(self=self, job_info=job_info, app_config=app_config) # File: dataset-viewer-main/services/worker/src/worker/job_runners/dataset/duckdb_index_size.py import logging from http import HTTPStatus from libcommon.exceptions import PreviousStepFormatError from libcommon.simple_cache import CachedArtifactNotFoundError, get_previous_step_or_raise, get_response from worker.dtos import ConfigDuckdbIndexSize, ConfigDuckdbIndexSizeResponse, DatasetDuckdbIndexSize, DatasetDuckdbIndexSizeResponse, JobResult, PreviousJob, SplitDuckdbIndexSize from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner def compute_dataset_duckdb_index_size_response(dataset: str) -> tuple[DatasetDuckdbIndexSizeResponse, float]: logging.info(f"compute 'config-duckdb-index-size' for dataset={dataset!r}") config_names_response = get_previous_step_or_raise(kind='dataset-config-names', dataset=dataset) content = config_names_response['content'] if 'config_names' not in content: raise PreviousStepFormatError("Previous step did not return the expected content: 'config_names'.") try: split_duckdb_index_sizes: list[SplitDuckdbIndexSize] = [] config_duckdb_index_sizes: list[ConfigDuckdbIndexSize] = [] total = 0 pending = [] failed = [] partial = False for config_item in content['config_names']: config = config_item['config'] total += 1 try: response = get_response(kind='config-duckdb-index-size', dataset=dataset, config=config) except CachedArtifactNotFoundError: logging.debug("No response found in previous step for this dataset: 'config-duckdb-index-size' endpoint.") pending.append(PreviousJob({'kind': 'config-duckdb-index-size', 'dataset': dataset, 'config': config, 'split': None})) continue if response['http_status'] != HTTPStatus.OK: logging.debug(f"Previous step gave an error: {response['http_status']}.") failed.append(PreviousJob({'kind': 'config-duckdb-index-size', 'dataset': dataset, 'config': config, 'split': None})) continue config_size_content = ConfigDuckdbIndexSizeResponse(size=response['content']['size'], partial=response['content']['partial']) config_duckdb_index_sizes.append(config_size_content['size']['config']) split_duckdb_index_sizes.extend(config_size_content['size']['splits']) partial = partial or config_size_content['partial'] dataset_duckdb_index_size: DatasetDuckdbIndexSize = {'dataset': dataset, 'has_fts': any((config_duckdb_index_size['has_fts'] for config_duckdb_index_size in config_duckdb_index_sizes)), 'num_rows': sum((config_duckdb_index_size['num_rows'] for config_duckdb_index_size in config_duckdb_index_sizes)), 'num_bytes': sum((config_duckdb_index_size['num_bytes'] for config_duckdb_index_size in config_duckdb_index_sizes))} except Exception as e: raise PreviousStepFormatError('Previous step did not return the expected content.', e) from e progress = (total - len(pending)) / total if total else 1.0 return (DatasetDuckdbIndexSizeResponse({'size': {'dataset': dataset_duckdb_index_size, 'configs': config_duckdb_index_sizes, 'splits': split_duckdb_index_sizes}, 'pending': pending, 'failed': failed, 'partial': partial}), progress) class DatasetDuckdbIndexSizeJobRunner(DatasetJobRunner): @staticmethod def get_job_type() -> str: return 'dataset-duckdb-index-size' def compute(self) -> JobResult: (response_content, progress) = compute_dataset_duckdb_index_size_response(dataset=self.dataset) return JobResult(response_content, progress=progress) # File: dataset-viewer-main/services/worker/src/worker/job_runners/dataset/filetypes.py import logging from collections import Counter from typing import Optional from datasets import DownloadConfig, StreamingDownloadManager from datasets.utils.file_utils import xbasename from huggingface_hub.hf_api import HfApi, RepoSibling from huggingface_hub.utils import RepositoryNotFoundError from libcommon.exceptions import DatasetNotFoundError from worker.dtos import CompleteJobResult, DatasetFiletypesResponse, Filetype from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunnerWithDatasetsCache from worker.utils import FileExtensionTuple, get_file_extension def extension_to_filetype(file_extension_tuple: FileExtensionTuple, count: int) -> Filetype: if file_extension_tuple[1]: return Filetype(extension=file_extension_tuple[0], count=count, compressed_in=file_extension_tuple[1]) return Filetype(extension=file_extension_tuple[0], count=count) def get_filetypes(siblings: list[RepoSibling]) -> list[Filetype]: counter = Counter[FileExtensionTuple]((t for sibling in siblings for t in get_file_extension(sibling.rfilename).get_tuples())) return [extension_to_filetype(k, v) for (k, v) in counter.items()] def get_counter_from_archive(dataset: str, archive_filename: str, hf_token: Optional[str]=None) -> Counter[FileExtensionTuple]: dl_manager = StreamingDownloadManager(download_config=DownloadConfig(token=hf_token)) base_url = f'hf://datasets/{dataset}/' archived_in = get_file_extension(archive_filename, recursive=False, clean=False).extension return Counter[FileExtensionTuple](((get_file_extension(xbasename(filename), recursive=False, clean=False).extension, archived_in) for filename in dl_manager.iter_files(dl_manager.extract(base_url + archive_filename)))) def get_filetypes_from_archives(dataset: str, archive_filenames: list[str], hf_token: Optional[str]=None) -> list[Filetype]: counter = Counter[FileExtensionTuple]() for archive_filename in archive_filenames: counter.update(get_counter_from_archive(dataset=dataset, archive_filename=archive_filename, hf_token=hf_token)) return [Filetype(extension=extension, count=v, archived_in=archived_in) if archived_in else Filetype(extension=extension, count=v) for ((extension, archived_in), v) in counter.items()] def compute_filetypes_response(dataset: str, hf_endpoint: str, hf_token: Optional[str]=None) -> DatasetFiletypesResponse: logging.info(f"compute 'dataset-filetypes' for dataset={dataset!r}") try: info = HfApi(endpoint=hf_endpoint, token=hf_token).dataset_info(dataset) except RepositoryNotFoundError as err: raise DatasetNotFoundError(f'Cannot get the dataset info for dataset={dataset!r}') from err filetypes = get_filetypes(info.siblings) SUPPORTED_ARCHIVE_EXTENSIONS = ['.zip'] archive_filenames = [sibling.rfilename for sibling in info.siblings if get_file_extension(sibling.rfilename, recursive=False, clean=False).extension in SUPPORTED_ARCHIVE_EXTENSIONS] filetypes_from_archives = get_filetypes_from_archives(dataset=dataset, archive_filenames=archive_filenames, hf_token=hf_token) return DatasetFiletypesResponse(filetypes=filetypes + filetypes_from_archives) class DatasetFiletypesJobRunner(DatasetJobRunnerWithDatasetsCache): @staticmethod def get_job_type() -> str: return 'dataset-filetypes' def compute(self) -> CompleteJobResult: return CompleteJobResult(compute_filetypes_response(dataset=self.dataset, hf_endpoint=self.app_config.common.hf_endpoint, hf_token=self.app_config.common.hf_token)) # File: dataset-viewer-main/services/worker/src/worker/job_runners/dataset/hub_cache.py import logging from typing import Optional from libcommon.exceptions import PreviousStepFormatError from libcommon.simple_cache import CachedArtifactNotFoundError, get_previous_step_or_raise from worker.dtos import CompatibleLibrary, DatasetFormat, DatasetHubCacheResponse, DatasetLibrary, DatasetModality, JobResult from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner def compute_hub_cache_response(dataset: str) -> tuple[DatasetHubCacheResponse, float]: logging.info(f"compute 'dataset-hub-cache' for dataset={dataset!r}") preview = False viewer = False progresses: list[Optional[float]] = [] try: is_valid_response = get_previous_step_or_raise(kind='dataset-is-valid', dataset=dataset) content = is_valid_response['content'] if 'preview' not in content or not isinstance(content['preview'], bool) or 'viewer' not in content or (not isinstance(content['viewer'], bool)): raise PreviousStepFormatError("Previous step 'dataset-is-valid' did not return the expected content: 'preview', 'viewer' or 'progress'.") preview = content['preview'] viewer = content['viewer'] progresses.append(is_valid_response['progress']) except PreviousStepFormatError: raise except Exception: logging.info(f"Missing 'dataset-is-valid' response for dataset={dataset!r}. We let the fields empty.") partial = False num_rows: Optional[int] = None try: size_response = get_previous_step_or_raise(kind='dataset-size', dataset=dataset) content = size_response['content'] if 'partial' not in content or not isinstance(content['partial'], bool) or 'size' not in content or ('dataset' not in content['size']) or ('num_rows' not in content['size']['dataset']) or (not isinstance(content['size']['dataset']['num_rows'], int)) or (not (isinstance(content['size']['dataset']['estimated_num_rows'], int) or content['size']['dataset']['estimated_num_rows'] is None)): raise PreviousStepFormatError("Previous step 'dataset-size' did not return the expected content: 'partial' or 'size.dataset.num_rows'.") partial = content['partial'] num_rows = content['size']['dataset']['estimated_num_rows'] or content['size']['dataset']['num_rows'] progresses.append(size_response['progress']) except PreviousStepFormatError: raise except Exception: logging.info(f"Missing 'dataset-size' response for dataset={dataset!r}. We let the fields empty.") libraries: list[DatasetLibrary] = [] formats: list[DatasetFormat] = [] modalities: list[DatasetModality] = [] try: compatible_libraries_response = get_previous_step_or_raise(kind='dataset-compatible-libraries', dataset=dataset) compatible_libraries: list[CompatibleLibrary] = compatible_libraries_response['content']['libraries'] libraries = [compatible_library['library'] for compatible_library in compatible_libraries] formats = compatible_libraries_response['content'].get('formats', []) progresses.append(compatible_libraries_response['progress']) except CachedArtifactNotFoundError: logging.info(f"Missing 'dataset-compatible-libraries' response for dataset={dataset!r}") except KeyError: raise PreviousStepFormatError("Previous step 'dataset-compatible-libraries' did not return the expected content: 'libraries'.") except Exception: logging.info("Error while parsing 'dataset-compatible-libraries' response. We let the fields empty.") try: modalities_response = get_previous_step_or_raise(kind='dataset-modalities', dataset=dataset) modalities = modalities_response['content']['modalities'] progresses.append(modalities_response['progress']) except CachedArtifactNotFoundError: logging.info(f"Missing 'dataset-modalities' response for dataset={dataset!r}") except KeyError: raise PreviousStepFormatError("Previous step 'dataset-modalities' did not return the expected content: 'modalities'.") except Exception: logging.info("Error while parsing 'dataset-modalities' response. We let the field empty.") return (DatasetHubCacheResponse(preview=preview, viewer=viewer, partial=partial, num_rows=num_rows, libraries=libraries, formats=formats, modalities=modalities), min([0.0 if p is None else p for p in progresses], default=0.0)) class DatasetHubCacheJobRunner(DatasetJobRunner): @staticmethod def get_job_type() -> str: return 'dataset-hub-cache' def compute(self) -> JobResult: (response_content, progress) = compute_hub_cache_response(dataset=self.dataset) return JobResult(response_content, progress=progress) # File: dataset-viewer-main/services/worker/src/worker/job_runners/dataset/info.py import logging from http import HTTPStatus from typing import Any from libcommon.exceptions import PreviousStepFormatError from libcommon.simple_cache import CachedArtifactNotFoundError, get_previous_step_or_raise, get_response from worker.dtos import DatasetInfoResponse, JobResult, PreviousJob from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner def compute_dataset_info_response(dataset: str) -> tuple[DatasetInfoResponse, float]: logging.info(f"compute 'dataset-info' for dataset={dataset!r}") config_names_response = get_previous_step_or_raise(kind='dataset-config-names', dataset=dataset) content = config_names_response['content'] if 'config_names' not in content: raise PreviousStepFormatError("Previous step did not return the expected content: 'config_names'.") try: config_infos: dict[str, Any] = {} total = 0 (pending, failed) = ([], []) partial = False for config_item in content['config_names']: config = config_item['config'] total += 1 try: config_response = get_response(kind='config-info', dataset=dataset, config=config) except CachedArtifactNotFoundError: logging.debug(f"No response found in previous step for dataset={dataset!r} config={config!r}: 'config-info'.") pending.append(PreviousJob(kind='config-info', dataset=dataset, config=config, split=None)) continue if config_response['http_status'] != HTTPStatus.OK: logging.debug(f"Previous step gave an error: {config_response['http_status']}") failed.append(PreviousJob(kind='config-info', dataset=dataset, config=config, split=None)) continue config_infos[config] = config_response['content']['dataset_info'] partial = partial or config_response['content']['partial'] except Exception as e: raise PreviousStepFormatError('Previous step did not return the expected content.', e) from e progress = (total - len(pending)) / total if total else 1.0 return (DatasetInfoResponse(dataset_info=config_infos, pending=pending, failed=failed, partial=partial), progress) class DatasetInfoJobRunner(DatasetJobRunner): @staticmethod def get_job_type() -> str: return 'dataset-info' def compute(self) -> JobResult: (response_content, progress) = compute_dataset_info_response(dataset=self.dataset) return JobResult(response_content, progress=progress) # File: dataset-viewer-main/services/worker/src/worker/job_runners/dataset/is_valid.py import logging from http import HTTPStatus from libcommon.exceptions import PreviousStepFormatError from libcommon.simple_cache import CachedArtifactNotFoundError, get_previous_step_or_raise, get_response from worker.dtos import IsValidResponse, JobResult from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner def compute_is_valid_response(dataset: str) -> tuple[IsValidResponse, float]: logging.info(f"compute 'dataset-is-valid' response for dataset={dataset!r}") preview = False viewer = False search = False filter = False statistics = False try: config_names_response = get_previous_step_or_raise(kind='dataset-config-names', dataset=dataset) except Exception: return (IsValidResponse(preview=preview, viewer=viewer, search=search, filter=filter, statistics=statistics), 0.0) content = config_names_response['content'] if 'config_names' not in content: raise PreviousStepFormatError("Previous step did not return the expected content: 'config_names'.") try: total = 0 pending = 0 for config_item in content['config_names']: config = config_item['config'] total += 1 try: response = get_response(kind='config-is-valid', dataset=dataset, config=config) except CachedArtifactNotFoundError: logging.debug("No response found in previous step for this dataset: 'config-is-valid'.") pending += 1 continue if response['http_status'] != HTTPStatus.OK: logging.debug(f"Previous step gave an error: {response['http_status']}.") continue config_is_valid_content = response['content'] preview = preview or config_is_valid_content['preview'] viewer = viewer or config_is_valid_content['viewer'] search = search or config_is_valid_content['search'] filter = filter or config_is_valid_content['filter'] statistics = statistics or config_is_valid_content['statistics'] except Exception as e: raise PreviousStepFormatError('Previous step did not return the expected content.', e) from e progress = (total - pending) / total if total else 1.0 return (IsValidResponse(preview=preview, viewer=viewer, search=search, filter=filter, statistics=statistics), progress) class DatasetIsValidJobRunner(DatasetJobRunner): @staticmethod def get_job_type() -> str: return 'dataset-is-valid' def compute(self) -> JobResult: (response_content, progress) = compute_is_valid_response(dataset=self.dataset) return JobResult(response_content, progress=progress) # File: dataset-viewer-main/services/worker/src/worker/job_runners/dataset/modalities.py import logging from http import HTTPStatus from datasets import Audio, Features, Image, Sequence, Translation, TranslationVariableLanguages, Value from datasets.features.features import FeatureType, _visit from libcommon.exceptions import PreviousStepFormatError from libcommon.simple_cache import CachedArtifactNotFoundError, get_previous_step_or_raise, get_response from worker.dtos import CompleteJobResult, DatasetModalitiesResponse, DatasetModality from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner def detect_features_modalities(features: Features) -> set[DatasetModality]: modalities: set[DatasetModality] = set() def classify_modality(feature: FeatureType) -> None: nonlocal modalities if isinstance(feature, Audio): modalities.add('audio') elif isinstance(feature, Image): modalities.add('image') elif isinstance(feature, Value) and feature.dtype in ('string', 'large_string'): modalities.add('text') elif isinstance(feature, (Translation, TranslationVariableLanguages)): modalities.add('text') _visit(features, classify_modality) if not ('audio' in modalities or 'image' in modalities) and len([feature for feature in features.values() if isinstance(feature, Value) and ('int' in feature.dtype or 'float' in feature.dtype)]) >= 2: modalities.add('tabular') if any(('emb' not in column_name and (isinstance(feature, Sequence) and feature.feature == Value('float32') or (isinstance(feature, list) and feature[0] == Value('float32'))) for (column_name, feature) in features.items())): modalities.add('timeseries') return modalities def detect_modalities_from_features(dataset: str) -> set[DatasetModality]: dataset_info_response = get_previous_step_or_raise(kind='dataset-info', dataset=dataset) content = dataset_info_response['content'] if 'dataset_info' not in content or not isinstance(content['dataset_info'], dict): raise PreviousStepFormatError("Previous step did not return the expected content: 'dataset_info'.") try: modalities: set[DatasetModality] = set() for config_info in content['dataset_info'].values(): modalities.update(detect_features_modalities(features=Features.from_dict(config_info['features']))) except Exception as e: raise PreviousStepFormatError('Previous step did not return the expected content.', e) from e return modalities def detect_modalities_from_url_columns(dataset: str) -> set[DatasetModality]: split_names_response = get_previous_step_or_raise(kind='dataset-split-names', dataset=dataset) content = split_names_response['content'] if 'splits' not in content and (not isinstance(content['splits'], list)): raise PreviousStepFormatError("Previous step did not return the expected content: 'splits'.") try: for split_item in content['splits'][:10]: config = split_item['config'] split = split_item['split'] try: response = get_response(kind='split-image-url-columns', dataset=dataset, config=config, split=split) except CachedArtifactNotFoundError: logging.debug("No response found in previous step for this dataset: 'split-image-url-columns'.") continue if response['http_status'] != HTTPStatus.OK: logging.debug(f"Previous step gave an error: {response['http_status']}.") continue else: try: if response['content']['columns']: return {'image'} except Exception as e: raise PreviousStepFormatError('Previous step did not return the expected content.', e) from e except Exception as e: raise PreviousStepFormatError('Previous step did not return the expected content.', e) from e return set() IMAGE_EXTENSIONS = {'.apng', '.avif', '.gif', '.jpg', '.jpeg', '.jfif', '.pjpeg', '.pjp', '.png', '.svg', 'webp', '.bmp', '.ico', '.cur', '.tif', '.tiff'} AUDIO_EXTENSIONS = {'.aac', '.flac', '.mp3', '.m4a', '.oga', '.wav', '.weba', '.opus', '.spx', '.wma', '.aiff', '.ape', '.mka', '.wv', '.tak'} AUDIO_BUT_COULD_ALSO_BE_VIDEO_EXTENSIONS = {'.ogg'} VIDEO_EXTENSIONS = {'.m4v', '.m4p', '.ogv', '.mov', '.mkv', '.avi', '.wmv', '.flv'} VIDEO_BUT_COULD_ALSO_BE_AUDIO_EXTENSIONS = {'.3gp', '.mpg', '.mpeg', '.mp4', '.webm'} GEOSPATIAL_EXTENSIONS = {'.shp', '.shx', '.dbf', '.prj', '.cpg', '.kml', '.kmz', '.gpx', '.geojson', '.topojson', '.gml', '.geoparquet', '.fgb', '.img', '.bil', '.bip', '.bsq', '.gpkg', '.mbtiles', '.pmtiles'} _3D_EXTENSIONS = {'.fbx', '.dae', '.dxf', '.obj', '.stl', '.ply', '.gltf', '.glb', '.usdz'} TEXT_EXTENSIONS = {'.txt'} MULTI_ROWS_EXTENSIONS = {'.parquet', '.csv', '.json', '.jsonl', '.arrow'} ALL_EXTENSIONS = IMAGE_EXTENSIONS | AUDIO_EXTENSIONS | AUDIO_BUT_COULD_ALSO_BE_VIDEO_EXTENSIONS | VIDEO_EXTENSIONS | VIDEO_BUT_COULD_ALSO_BE_AUDIO_EXTENSIONS | GEOSPATIAL_EXTENSIONS | _3D_EXTENSIONS | TEXT_EXTENSIONS | MULTI_ROWS_EXTENSIONS def detect_modalities_from_filetypes(dataset: str) -> set[DatasetModality]: dataset_filetypes_response = get_previous_step_or_raise(kind='dataset-filetypes', dataset=dataset) content = dataset_filetypes_response['content'] if 'filetypes' not in content or not isinstance(content['filetypes'], list): raise PreviousStepFormatError("Previous step did not return the expected content: 'filetypes'.") try: modalities: set[DatasetModality] = set() total_count = sum((filetype['count'] for filetype in content['filetypes'] if filetype['extension'] in ALL_EXTENSIONS)) has_multi_rows_files = any((filetype['count'] for filetype in content['filetypes'] if filetype['extension'] in MULTI_ROWS_EXTENSIONS)) min_count = round(0.1 * total_count) min_count_for_image = 10 if has_multi_rows_files else 1 for filetype in content['filetypes']: if filetype['count'] < min_count: continue elif filetype['extension'] in IMAGE_EXTENSIONS and filetype['count'] < min_count_for_image: continue if filetype['extension'] in IMAGE_EXTENSIONS: modalities.add('image') elif filetype['extension'] in AUDIO_EXTENSIONS | AUDIO_BUT_COULD_ALSO_BE_VIDEO_EXTENSIONS: modalities.add('audio') elif filetype['extension'] in VIDEO_EXTENSIONS | VIDEO_BUT_COULD_ALSO_BE_AUDIO_EXTENSIONS: modalities.add('video') elif filetype['extension'] in GEOSPATIAL_EXTENSIONS: modalities.add('geospatial') elif filetype['extension'] in _3D_EXTENSIONS: modalities.add('3d') elif filetype['extension'] in TEXT_EXTENSIONS: modalities.add('text') except Exception as e: raise PreviousStepFormatError('Previous step did not return the expected content.', e) from e return modalities def compute_modalities_response(dataset: str) -> DatasetModalitiesResponse: logging.info(f"compute 'dataset-modalities' for dataset={dataset!r}") modalities: set[DatasetModality] = set() try: modalities.update(detect_modalities_from_features(dataset)) except PreviousStepFormatError: raise except Exception: logging.info(f'failed to detect modalities from features of dataset={dataset!r}') pass try: modalities.update(detect_modalities_from_url_columns(dataset)) except PreviousStepFormatError: raise except Exception: logging.info(f'failed to detect modalities from file types of dataset={dataset!r}') pass try: modalities.update(detect_modalities_from_filetypes(dataset)) except PreviousStepFormatError: raise except Exception: logging.info(f'failed to detect modalities from file types of dataset={dataset!r}') pass return DatasetModalitiesResponse({'modalities': sorted(modalities)}) class DatasetModalitiesJobRunner(DatasetJobRunner): @staticmethod def get_job_type() -> str: return 'dataset-modalities' def compute(self) -> CompleteJobResult: response_content = compute_modalities_response(dataset=self.dataset) return CompleteJobResult(response_content) # File: dataset-viewer-main/services/worker/src/worker/job_runners/dataset/opt_in_out_urls_count.py import logging from http import HTTPStatus from libcommon.exceptions import PreviousStepFormatError from libcommon.simple_cache import CachedArtifactNotFoundError, get_previous_step_or_raise, get_response from worker.dtos import JobResult, OptInOutUrlsCountResponse from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner def compute_opt_in_out_urls_count_response(dataset: str) -> tuple[OptInOutUrlsCountResponse, float]: logging.info(f"compute 'dataset-opt-in-out-urls-count' for dataset={dataset!r}") config_names_response = get_previous_step_or_raise(kind='dataset-config-names', dataset=dataset) content = config_names_response['content'] if 'config_names' not in content: raise PreviousStepFormatError("Previous step did not return the expected content: 'config_names'.") urls_columns = [] num_opt_in_urls = 0 num_opt_out_urls = 0 num_urls = 0 num_scanned_rows = 0 full_scan_count = 0 try: total = 0 pending = 0 for config_item in content['config_names']: config = config_item['config'] total += 1 try: response = get_response(kind='config-opt-in-out-urls-count', dataset=dataset, config=config) except CachedArtifactNotFoundError: logging.debug("No response found in previous step for this dataset: 'config-opt-in-out-urls-count'.") pending += 1 continue if response['http_status'] != HTTPStatus.OK: logging.debug(f"Previous step gave an error: {response['http_status']}.") continue elif response['progress'] and response['progress'] < 1.0: logging.debug(f"Previous step is still in progress: {response['progress']}.") pending += 1 continue split_opt_in_out_content = response['content'] urls_columns.extend(split_opt_in_out_content['urls_columns']) num_opt_in_urls += split_opt_in_out_content['num_opt_in_urls'] num_opt_out_urls += split_opt_in_out_content['num_opt_out_urls'] num_urls += split_opt_in_out_content['num_urls'] num_scanned_rows += split_opt_in_out_content['num_scanned_rows'] full_scan_count += 1 if split_opt_in_out_content['full_scan'] else 0 except Exception as e: raise PreviousStepFormatError('Previous step did not return the expected content.', e) from e unique_urls_columns = sorted(list(set(urls_columns))) has_urls_columns = len(unique_urls_columns) > 0 progress = (total - pending) / total if total else 1.0 full_scan = full_scan_count == total return (OptInOutUrlsCountResponse(urls_columns=unique_urls_columns, has_urls_columns=has_urls_columns, num_opt_in_urls=num_opt_in_urls, num_opt_out_urls=num_opt_out_urls, num_scanned_rows=num_scanned_rows, num_urls=num_urls, full_scan=full_scan), progress) class DatasetOptInOutUrlsCountJobRunner(DatasetJobRunner): @staticmethod def get_job_type() -> str: return 'dataset-opt-in-out-urls-count' def compute(self) -> JobResult: (response_content, progress) = compute_opt_in_out_urls_count_response(dataset=self.dataset) return JobResult(response_content, progress=progress) # File: dataset-viewer-main/services/worker/src/worker/job_runners/dataset/parquet.py import logging from http import HTTPStatus from libcommon.dtos import SplitHubFile from libcommon.exceptions import PreviousStepFormatError from libcommon.simple_cache import CachedArtifactNotFoundError, get_previous_step_or_raise, get_response from worker.dtos import ConfigParquetResponse, DatasetParquetResponse, JobResult, PreviousJob from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner def compute_parquet_response(dataset: str) -> tuple[DatasetParquetResponse, float]: logging.info(f"compute 'dataset-parquet' for dataset={dataset!r}") config_names_response = get_previous_step_or_raise(kind='dataset-config-names', dataset=dataset) content = config_names_response['content'] if 'config_names' not in content: raise PreviousStepFormatError("Previous step did not return the expected content: 'config_names'.") try: parquet_files: list[SplitHubFile] = [] total = 0 pending = [] failed = [] partial = False for config_item in content['config_names']: config = config_item['config'] total += 1 try: response = get_response(kind='config-parquet', dataset=dataset, config=config) except CachedArtifactNotFoundError: logging.debug("No response found in previous step for this dataset: 'config-parquet' endpoint.") pending.append(PreviousJob({'kind': 'config-parquet', 'dataset': dataset, 'config': config, 'split': None})) continue if response['http_status'] != HTTPStatus.OK: logging.debug(f"Previous step gave an error: {response['http_status']}.") failed.append(PreviousJob({'kind': 'config-parquet', 'dataset': dataset, 'config': config, 'split': None})) continue config_parquet_content = ConfigParquetResponse(parquet_files=response['content']['parquet_files'], partial=response['content']['partial'], features=None) parquet_files.extend(config_parquet_content['parquet_files']) partial = partial or config_parquet_content['partial'] except Exception as e: raise PreviousStepFormatError('Previous step did not return the expected content.', e) from e progress = (total - len(pending)) / total if total else 1.0 return (DatasetParquetResponse(parquet_files=parquet_files, pending=pending, failed=failed, partial=partial), progress) class DatasetParquetJobRunner(DatasetJobRunner): @staticmethod def get_job_type() -> str: return 'dataset-parquet' def compute(self) -> JobResult: (response_content, progress) = compute_parquet_response(dataset=self.dataset) return JobResult(response_content, progress=progress) # File: dataset-viewer-main/services/worker/src/worker/job_runners/dataset/presidio_entities_count.py import logging from http import HTTPStatus from libcommon.exceptions import PreviousStepFormatError from libcommon.simple_cache import CachedArtifactNotFoundError, get_previous_step_or_raise, get_response from worker.dtos import JobResult, PresidioEntitiesCountResponse from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner def compute_presidio_entities_count_response(dataset: str) -> tuple[PresidioEntitiesCountResponse, float]: logging.info(f"compute 'dataset-presidio-entities-count' for dataset={dataset!r}") split_names_response = get_previous_step_or_raise(kind='dataset-split-names', dataset=dataset) content = split_names_response['content'] if 'splits' not in content: raise PreviousStepFormatError("Previous step did not return the expected content: 'splits'.") scanned_columns = set() presidio_entities_count_response = PresidioEntitiesCountResponse({'scanned_columns': [], 'num_rows_with_person_entities': 0, 'num_rows_with_phone_number_entities': 0, 'num_rows_with_email_address_entities': 0, 'num_rows_with_sensitive_pii': 0, 'num_scanned_rows': 0, 'has_scanned_columns': False, 'full_scan': True}) try: total = 0 pending = 0 for split_item in content['splits']: config = split_item['config'] split = split_item['split'] total += 1 try: response = get_response(kind='split-presidio-scan', dataset=dataset, config=config, split=split) except CachedArtifactNotFoundError: logging.debug("No response found in previous step for this dataset: 'split-presidio-scan'.") pending += 1 continue if response['http_status'] != HTTPStatus.OK: logging.debug(f"Previous step gave an error: {response['http_status']}.") continue split_presidio_scan_content = response['content'] scanned_columns.update(split_presidio_scan_content['scanned_columns']) if not split_presidio_scan_content['full_scan']: presidio_entities_count_response['full_scan'] = False presidio_entities_count_response['num_rows_with_person_entities'] += split_presidio_scan_content['num_rows_with_person_entities'] presidio_entities_count_response['num_rows_with_phone_number_entities'] += split_presidio_scan_content['num_rows_with_phone_number_entities'] presidio_entities_count_response['num_rows_with_email_address_entities'] += split_presidio_scan_content['num_rows_with_email_address_entities'] presidio_entities_count_response['num_rows_with_sensitive_pii'] += split_presidio_scan_content['num_rows_with_credit_card_entities'] presidio_entities_count_response['num_rows_with_sensitive_pii'] += split_presidio_scan_content['num_rows_with_us_ssn_entities'] presidio_entities_count_response['num_rows_with_sensitive_pii'] += split_presidio_scan_content['num_rows_with_us_passport_entities'] presidio_entities_count_response['num_rows_with_sensitive_pii'] += split_presidio_scan_content['num_rows_with_iban_code_entities'] presidio_entities_count_response['num_scanned_rows'] += split_presidio_scan_content['num_scanned_rows'] except Exception as e: raise PreviousStepFormatError('Previous step did not return the expected content.', e) from e presidio_entities_count_response['scanned_columns'] = sorted(scanned_columns) presidio_entities_count_response['has_scanned_columns'] = len(presidio_entities_count_response['scanned_columns']) > 0 progress = (total - pending) / total if total else 1.0 return (presidio_entities_count_response, progress) class DatasetPresidioEntitiesCountJobRunner(DatasetJobRunner): @staticmethod def get_job_type() -> str: return 'dataset-presidio-entities-count' def compute(self) -> JobResult: (response_content, progress) = compute_presidio_entities_count_response(dataset=self.dataset) return JobResult(response_content, progress=progress) # File: dataset-viewer-main/services/worker/src/worker/job_runners/dataset/size.py import logging from http import HTTPStatus from typing import Optional from libcommon.exceptions import PreviousStepFormatError from libcommon.simple_cache import CachedArtifactNotFoundError, get_previous_step_or_raise, get_response from worker.dtos import ConfigSize, ConfigSizeResponse, DatasetSize, DatasetSizeResponse, JobResult, PreviousJob, SplitSize from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner def compute_sizes_response(dataset: str) -> tuple[DatasetSizeResponse, float]: logging.info(f"compute 'dataset-size' for dataset={dataset!r}") config_names_response = get_previous_step_or_raise(kind='dataset-config-names', dataset=dataset) content = config_names_response['content'] if 'config_names' not in content: raise PreviousStepFormatError("Previous step did not return the expected content: 'config_names'.") try: split_sizes: list[SplitSize] = [] config_sizes: list[ConfigSize] = [] total = 0 pending = [] failed = [] partial = False for config_item in content['config_names']: config = config_item['config'] total += 1 try: response = get_response(kind='config-size', dataset=dataset, config=config) except CachedArtifactNotFoundError: logging.debug("No response found in previous step for this dataset: 'config-size' endpoint.") pending.append(PreviousJob({'kind': 'config-size', 'dataset': dataset, 'config': config, 'split': None})) continue if response['http_status'] != HTTPStatus.OK: logging.debug(f"Previous step gave an error: {response['http_status']}.") failed.append(PreviousJob({'kind': 'config-size', 'dataset': dataset, 'config': config, 'split': None})) continue config_size_content = ConfigSizeResponse(size=response['content']['size'], partial=response['content']['partial']) config_sizes.append(config_size_content['size']['config']) split_sizes.extend(config_size_content['size']['splits']) partial = partial or config_size_content['partial'] num_bytes_original_files: Optional[int] = 0 for config_size in config_sizes: if num_bytes_original_files is not None and isinstance(config_size['num_bytes_original_files'], int): num_bytes_original_files += config_size['num_bytes_original_files'] else: num_bytes_original_files = None break dataset_size: DatasetSize = {'dataset': dataset, 'num_bytes_original_files': num_bytes_original_files, 'num_bytes_parquet_files': sum((config_size['num_bytes_parquet_files'] for config_size in config_sizes)), 'num_bytes_memory': sum((config_size['num_bytes_memory'] for config_size in config_sizes)), 'num_rows': sum((config_size['num_rows'] for config_size in config_sizes)), 'estimated_num_rows': sum((config_size['estimated_num_rows'] or config_size['num_rows'] for config_size in config_sizes)) if any((config_size['estimated_num_rows'] for config_size in config_sizes)) else None} except Exception as e: raise PreviousStepFormatError('Previous step did not return the expected content.', e) from e progress = (total - len(pending)) / total if total else 1.0 return (DatasetSizeResponse({'size': {'dataset': dataset_size, 'configs': config_sizes, 'splits': split_sizes}, 'pending': pending, 'failed': failed, 'partial': partial}), progress) class DatasetSizeJobRunner(DatasetJobRunner): @staticmethod def get_job_type() -> str: return 'dataset-size' def compute(self) -> JobResult: (response_content, progress) = compute_sizes_response(dataset=self.dataset) return JobResult(response_content, progress=progress) # File: dataset-viewer-main/services/worker/src/worker/job_runners/dataset/split_names.py import logging from http import HTTPStatus from libcommon.constants import CONFIG_SPLIT_NAMES_KIND from libcommon.dtos import FullConfigItem, FullSplitItem from libcommon.exceptions import PreviousStepFormatError from libcommon.simple_cache import CachedArtifactNotFoundError, get_previous_step_or_raise, get_response from worker.dtos import DatasetSplitNamesResponse, FailedConfigItem, JobResult from worker.job_runners.dataset.dataset_job_runner import DatasetJobRunner def compute_dataset_split_names_response(dataset: str) -> tuple[DatasetSplitNamesResponse, float]: logging.info(f"compute 'dataset-split-names' for dataset={dataset!r}") config_names_response = get_previous_step_or_raise(kind='dataset-config-names', dataset=dataset) content = config_names_response['content'] if 'config_names' not in content: raise PreviousStepFormatError("'dataset-config-names' did not return the expected content: 'config_names'.") config_names = [config_name_item['config'] for config_name_item in content['config_names']] if any((not isinstance(config_name, str) for config_name in config_names)): raise PreviousStepFormatError("Previous step 'dataset-config-names' did not return a list of config names.") try: splits: list[FullSplitItem] = [] pending: list[FullConfigItem] = [] failed: list[FailedConfigItem] = [] total = 0 for config in config_names: total += 1 try: response = get_response(CONFIG_SPLIT_NAMES_KIND, dataset=dataset, config=config) except CachedArtifactNotFoundError: logging.debug(f"No response (successful or erroneous) found in cache for the previous step '{CONFIG_SPLIT_NAMES_KIND}' for this dataset.") pending.append(FullConfigItem({'dataset': dataset, 'config': config})) continue if response['http_status'] != HTTPStatus.OK: logging.debug(f'No successful response found in the previous step {CONFIG_SPLIT_NAMES_KIND}.') failed.append(FailedConfigItem({'dataset': dataset, 'config': config, 'error': response['content']})) continue splits.extend([FullSplitItem({'dataset': dataset, 'config': config, 'split': split_content['split']}) for split_content in response['content']['splits']]) except Exception as e: raise PreviousStepFormatError('Previous step did not return the expected content.', e) from e progress = (total - len(pending)) / total if total else 1.0 return (DatasetSplitNamesResponse({'splits': splits, 'pending': pending, 'failed': failed}), progress) class DatasetSplitNamesJobRunner(DatasetJobRunner): @staticmethod def get_job_type() -> str: return 'dataset-split-names' def compute(self) -> JobResult: (response_content, progress) = compute_dataset_split_names_response(dataset=self.dataset) return JobResult(response_content, progress=progress) # File: dataset-viewer-main/services/worker/src/worker/job_runners/split/descriptive_statistics.py import logging from collections import Counter from pathlib import Path from typing import Any, Optional, TypedDict, Union import polars as pl import pyarrow.parquet as pq from datasets import Features, Sequence from datasets.features.features import FeatureType, _ArrayXD from libcommon.dtos import JobInfo from libcommon.exceptions import CacheDirectoryNotInitializedError, FeaturesResponseEmptyError, NoSupportedFeaturesError, ParquetResponseEmptyError, PolarsParquetReadError, PreviousStepFormatError from libcommon.parquet_utils import extract_split_directory_from_parquet_url, get_num_parquet_files_to_process, is_list_pa_type, parquet_export_is_partial from libcommon.simple_cache import get_previous_step_or_raise from libcommon.storage import StrPath from libcommon.utils import download_file_from_hub from worker.config import AppConfig, DescriptiveStatisticsConfig from worker.dtos import CompleteJobResult from worker.job_runners.split.split_job_runner import SplitJobRunnerWithCache from worker.statistics_utils import FLOAT_DTYPES, INTEGER_DTYPES, NUMERICAL_DTYPES, STRING_DTYPES, AudioColumn, BoolColumn, ClassLabelColumn, FloatColumn, ImageColumn, IntColumn, ListColumn, StatisticsPerColumnItem, StringColumn REPO_TYPE = 'dataset' class SplitDescriptiveStatisticsResponse(TypedDict): num_examples: int statistics: list[StatisticsPerColumnItem] partial: bool SupportedColumns = Union[ClassLabelColumn, IntColumn, FloatColumn, StringColumn, BoolColumn, ListColumn, AudioColumn, ImageColumn] def is_extension_feature(feature: FeatureType) -> bool: if isinstance(feature, dict): return any((is_extension_feature(f) for f in feature.values())) elif isinstance(feature, (list, tuple)): return any((is_extension_feature(f) for f in feature)) elif isinstance(feature, Sequence): return is_extension_feature(feature.feature) else: return isinstance(feature, _ArrayXD) def get_extension_features(features: dict[str, Any]) -> set[str]: features = Features.from_dict(features) return {feature_name for (feature_name, feature) in features.items() if is_extension_feature(feature)} def compute_descriptive_statistics_response(dataset: str, config: str, split: str, local_parquet_directory: Path, hf_token: Optional[str], parquet_revision: str, max_split_size_bytes: int, parquet_metadata_directory: StrPath) -> SplitDescriptiveStatisticsResponse: logging.info(f"compute 'split-descriptive-statistics' for dataset={dataset!r} config={config!r} split={split!r}") config_parquet_metadata_step = 'config-parquet-metadata' parquet_metadata_response = get_previous_step_or_raise(kind=config_parquet_metadata_step, dataset=dataset, config=config) content_parquet_metadata = parquet_metadata_response['content'] try: split_parquet_files = [parquet_file for parquet_file in content_parquet_metadata['parquet_files_metadata'] if parquet_file['config'] == config and parquet_file['split'] == split] features = content_parquet_metadata['features'] except KeyError as e: raise PreviousStepFormatError(f"Previous step '{config_parquet_metadata_step}' did not return the expected content", e) from e if not split_parquet_files: raise ParquetResponseEmptyError('No parquet files found.') if not features: raise FeaturesResponseEmptyError('No features found.') (num_parquet_files_to_process, num_bytes, num_rows) = get_num_parquet_files_to_process(parquet_files=split_parquet_files, parquet_metadata_directory=parquet_metadata_directory, max_size_bytes=max_split_size_bytes) partial_parquet_export = parquet_export_is_partial(split_parquet_files[0]['url']) partial = partial_parquet_export or num_parquet_files_to_process < len(split_parquet_files) split_parquet_files = split_parquet_files[:num_parquet_files_to_process] logging.info(f'Downloading remote parquet files to a local directory {local_parquet_directory}. ') split_directory = extract_split_directory_from_parquet_url(split_parquet_files[0]['url']) for parquet_file in split_parquet_files: download_file_from_hub(repo_type=REPO_TYPE, revision=parquet_revision, repo_id=dataset, filename=f"{config}/{split_directory}/{parquet_file['filename']}", local_dir=local_parquet_directory, hf_token=hf_token, cache_dir=local_parquet_directory, force_download=True, resume_download=False) local_parquet_split_directory = Path(local_parquet_directory) / config / split_directory pq_split_dataset = pq.ParquetDataset(local_parquet_split_directory) num_examples = sum((fragment.metadata.num_rows for fragment in pq_split_dataset.fragments)) split_extension_features = get_extension_features(features) features = {feature_name: feature for (feature_name, feature) in features.items() if feature_name not in split_extension_features} def _column_from_feature(dataset_feature_name: str, dataset_feature: Union[dict[str, Any], list[Any]]) -> Optional[SupportedColumns]: if isinstance(dataset_feature, list) or (isinstance(dataset_feature, dict) and dataset_feature.get('_type') == 'Sequence'): if is_list_pa_type(local_parquet_split_directory / split_parquet_files[0]['filename'], dataset_feature_name): return ListColumn(feature_name=dataset_feature_name, n_samples=num_examples) if isinstance(dataset_feature, dict): if dataset_feature.get('_type') == 'ClassLabel': return ClassLabelColumn(feature_name=dataset_feature_name, n_samples=num_examples, feature_dict=dataset_feature) if dataset_feature.get('_type') == 'Audio': return AudioColumn(feature_name=dataset_feature_name, n_samples=num_examples) if dataset_feature.get('_type') == 'Image': return ImageColumn(feature_name=dataset_feature_name, n_samples=num_examples) if dataset_feature.get('_type') == 'Value': if dataset_feature.get('dtype') in INTEGER_DTYPES: return IntColumn(feature_name=dataset_feature_name, n_samples=num_examples) if dataset_feature.get('dtype') in FLOAT_DTYPES: return FloatColumn(feature_name=dataset_feature_name, n_samples=num_examples) if dataset_feature.get('dtype') in STRING_DTYPES: return StringColumn(feature_name=dataset_feature_name, n_samples=num_examples) if dataset_feature.get('dtype') == 'bool': return BoolColumn(feature_name=dataset_feature_name, n_samples=num_examples) return None columns: list[SupportedColumns] = [] all_stats: list[StatisticsPerColumnItem] = [] for (feature_name, feature) in features.items(): if (column := _column_from_feature(feature_name, feature)) is not None: columns.append(column) if not columns: raise NoSupportedFeaturesError(f'No columns for statistics computation found. Currently supported feature types are: {NUMERICAL_DTYPES}, {STRING_DTYPES}, ClassLabel, list/Sequence and bool. ') column_names_str = ', '.join([column.name for column in columns]) column_counts = Counter([column.__class__.__name__ for column in columns]) logging.info(f'Computing statistics for {len(columns)} columns: {column_names_str},\nColumn types counts: {column_counts}. ') for column in columns: if isinstance(column, AudioColumn) or isinstance(column, ImageColumn): column_stats = column.compute_and_prepare_response(local_parquet_split_directory) else: try: if split_extension_features: data = pl.DataFrame._from_arrow(pq.read_table(local_parquet_split_directory, columns=[column.name])) else: data = pl.read_parquet(local_parquet_split_directory / '*.parquet', columns=[column.name]) except Exception as error: raise PolarsParquetReadError(f'Error reading parquet file(s) at local_parquet_split_directory={local_parquet_split_directory!r}, columns=[{column.name}]: {error}', error) column_stats = column.compute_and_prepare_response(data) all_stats.append(column_stats) if not all_stats: raise NoSupportedFeaturesError(f'No columns for statistics computation found. Currently supported feature types are: {NUMERICAL_DTYPES}, {STRING_DTYPES}, ClassLabel, list/Sequence and bool. ') logging.info(f'Computing for dataset={dataset!r} config={config!r} split={split!r} finished. {len(all_stats)} columns processed. ') return SplitDescriptiveStatisticsResponse(num_examples=num_examples, statistics=sorted(all_stats, key=lambda x: x['column_name']), partial=partial) class SplitDescriptiveStatisticsJobRunner(SplitJobRunnerWithCache): descriptive_statistics_config: DescriptiveStatisticsConfig def __init__(self, job_info: JobInfo, app_config: AppConfig, statistics_cache_directory: StrPath, parquet_metadata_directory: StrPath): super().__init__(job_info=job_info, app_config=app_config, cache_directory=Path(statistics_cache_directory)) self.descriptive_statistics_config = app_config.descriptive_statistics self.parquet_metadata_directory = parquet_metadata_directory @staticmethod def get_job_type() -> str: return 'split-descriptive-statistics' def compute(self) -> CompleteJobResult: if self.cache_subdirectory is None: raise CacheDirectoryNotInitializedError('Cache directory has not been initialized.') return CompleteJobResult(compute_descriptive_statistics_response(dataset=self.dataset, config=self.config, split=self.split, local_parquet_directory=self.cache_subdirectory, hf_token=self.app_config.common.hf_token, parquet_revision=self.descriptive_statistics_config.parquet_revision, max_split_size_bytes=self.descriptive_statistics_config.max_split_size_bytes, parquet_metadata_directory=self.parquet_metadata_directory)) # File: dataset-viewer-main/services/worker/src/worker/job_runners/split/duckdb_index.py import copy import logging import re from pathlib import Path from typing import Any, Literal, Optional import duckdb import polars as pl from datasets.features.features import Features, FeatureType, Translation, TranslationVariableLanguages, Value, _visit from huggingface_hub._commit_api import CommitOperation, CommitOperationAdd, CommitOperationDelete from huggingface_hub.hf_api import HfApi from huggingface_hub.repocard_data import DatasetCardData from huggingface_hub.utils._errors import HfHubHTTPError, RepositoryNotFoundError from libcommon.constants import DUCKDB_INDEX_JOB_RUNNER_SUBDIRECTORY, ROW_IDX_COLUMN from libcommon.dtos import JobInfo from libcommon.exceptions import CacheDirectoryNotInitializedError, CreateCommitError, DatasetNotFoundError, DuckDBIndexFileNotFoundError, LockedDatasetTimeoutError, ParquetResponseEmptyError, PreviousStepFormatError from libcommon.parquet_utils import extract_split_directory_from_parquet_url, get_num_parquet_files_to_process, is_list_pa_type, parquet_export_is_partial from libcommon.queue.lock import lock from libcommon.simple_cache import get_previous_step_or_raise from libcommon.storage import StrPath from libcommon.utils import HF_HUB_HTTP_ERROR_RETRY_SLEEPS, download_file_from_hub, retry from worker.config import AppConfig, DuckDbIndexConfig from worker.dtos import CompleteJobResult, SplitDuckdbIndex from worker.job_runners.split.split_job_runner import SplitJobRunnerWithCache from worker.statistics_utils import STRING_DTYPES, AudioColumn, ImageColumn, ListColumn, StringColumn from worker.utils import LOCK_GIT_BRANCH_RETRY_SLEEPS, create_branch, get_split_names, hf_hub_url DATASET_TYPE = 'dataset' DEFAULT_STEMMER = 'none' DUCKDB_DEFAULT_INDEX_FILENAME = 'index.duckdb' DUCKDB_DEFAULT_PARTIAL_INDEX_FILENAME = 'partial-index.duckdb' CREATE_INDEX_COMMAND = f"PRAGMA create_fts_index('data', '{ROW_IDX_COLUMN}', {{columns}}, stemmer='{{stemmer}}', overwrite=1);" CREATE_TABLE_COMMAND = "CREATE OR REPLACE TABLE data AS SELECT {columns} FROM '{source}';" CREATE_TABLE_JOIN_WITH_TRANSFORMED_DATA_COMMAND = "\n CREATE OR REPLACE TABLE data AS \n SELECT {columns}, transformed_df.* FROM '{source}' \n POSITIONAL JOIN transformed_df;\n" CREATE_SEQUENCE_COMMAND = 'CREATE OR REPLACE SEQUENCE serial START 0 MINVALUE 0;' ALTER_TABLE_BY_ADDING_SEQUENCE_COLUMN = f"ALTER TABLE data ADD COLUMN {ROW_IDX_COLUMN} BIGINT DEFAULT nextval('serial');" CREATE_INDEX_ID_COLUMN_COMMANDS = CREATE_SEQUENCE_COMMAND + ALTER_TABLE_BY_ADDING_SEQUENCE_COLUMN INSTALL_AND_LOAD_EXTENSION_COMMAND = "INSTALL 'fts'; LOAD 'fts';" SET_EXTENSIONS_DIRECTORY_COMMAND = "SET extension_directory='{directory}';" REPO_TYPE = 'dataset' STEMMER_MAPPING = {'arabic': ['ar', 'ara'], 'basque': ['eu', 'eus'], 'catalan': ['ca', 'cat'], 'danish': ['da', 'dan'], 'dutch': ['nl', 'nld'], 'english': ['en', 'eng'], 'finnish': ['fi', 'fin'], 'french': ['fr', 'fra'], 'german': ['de', 'deu'], 'greek': ['el', 'ell'], 'hindi': ['hi', 'hin'], 'hungarian': ['hu', 'hun'], 'indonesian': ['id', 'ind'], 'irish': ['ga', 'gle'], 'italian': ['it', 'ita'], 'lithuanian': ['lt', 'lit'], 'nepali': ['ne', 'nep'], 'norwegian': ['no', 'nor'], 'portuguese': ['pt', 'por'], 'romanian': ['ro', 'ron'], 'russian': ['ru', 'rus'], 'serbian': ['sr', 'srp'], 'spanish': ['es', 'spa'], 'swedish': ['sv', 'swe'], 'tamil': ['ta', 'tam'], 'turkish': ['tr', 'tur']} LengthDtype = Literal['string', 'list'] def get_indexable_columns(features: Features) -> list[str]: indexable_columns: list[str] = [] for (column, feature) in features.items(): indexable = False def check_indexable(feature: FeatureType) -> None: nonlocal indexable if isinstance(feature, Value) and feature.dtype in STRING_DTYPES: indexable = True elif isinstance(feature, (Translation, TranslationVariableLanguages)): indexable = True _visit(feature, check_indexable) if indexable: indexable_columns.append(column) return indexable_columns def get_monolingual_stemmer(card_data: DatasetCardData) -> str: if card_data is None: return DEFAULT_STEMMER all_languages = card_data['language'] if isinstance(all_languages, list) and len(all_languages) == 1: first_language = all_languages[0] elif isinstance(all_languages, str): first_language = all_languages else: return DEFAULT_STEMMER return next((language for (language, codes) in STEMMER_MAPPING.items() if first_language in codes), DEFAULT_STEMMER) def compute_length_column(parquet_directory: Path, column_name: str, target_df: Optional[pl.DataFrame], dtype: LengthDtype) -> pl.DataFrame: column_class = ListColumn if dtype == 'list' else StringColumn df = pl.read_parquet(str(parquet_directory / '*.parquet'), columns=[column_name]) lengths_column_name = f'{column_name}.length' lengths_df: pl.DataFrame = column_class.compute_transformed_data(df, column_name, transformed_column_name=lengths_column_name) if target_df is None: return lengths_df.select(pl.col(lengths_column_name)) target_df.insert_column(target_df.shape[1], lengths_df[lengths_column_name]) return target_df def compute_audio_duration_column(parquet_directory: Path, column_name: str, target_df: Optional[pl.DataFrame]) -> pl.DataFrame: duration_column_name = f'{column_name}.duration' durations = AudioColumn.compute_transformed_data(parquet_directory, column_name, AudioColumn.get_duration) duration_df = pl.from_dict({duration_column_name: durations}) if target_df is None: return duration_df target_df.insert_column(target_df.shape[1], duration_df[duration_column_name]) return target_df def compute_image_width_height_column(parquet_directory: Path, column_name: str, target_df: Optional[pl.DataFrame]) -> pl.DataFrame: shapes = ImageColumn.compute_transformed_data(parquet_directory, column_name, ImageColumn.get_shape) (widths, heights) = list(zip(*shapes)) (width_column_name, height_column_name) = (f'{column_name}.width', f'{column_name}.height') shapes_df = pl.from_dict({width_column_name: widths, height_column_name: heights}) if target_df is None: return shapes_df target_df.insert_column(target_df.shape[1], shapes_df[width_column_name]) target_df.insert_column(target_df.shape[1], shapes_df[height_column_name]) return target_df def compute_transformed_data(parquet_directory: Path, features: dict[str, Any]) -> Optional[pl.DataFrame]: transformed_df = None for (feature_name, feature) in features.items(): if isinstance(feature, list) or (isinstance(feature, dict) and feature.get('_type') == 'Sequence'): first_parquet_file = list(parquet_directory.glob('*.parquet'))[0] if is_list_pa_type(first_parquet_file, feature_name): transformed_df = compute_length_column(parquet_directory, feature_name, transformed_df, dtype='list') elif isinstance(feature, dict): if feature.get('_type') == 'Value' and feature.get('dtype') in STRING_DTYPES: transformed_df = compute_length_column(parquet_directory, feature_name, transformed_df, dtype='string') elif feature.get('_type') == 'Audio': transformed_df = compute_audio_duration_column(parquet_directory, feature_name, transformed_df) elif feature.get('_type') == 'Image': transformed_df = compute_image_width_height_column(parquet_directory, feature_name, transformed_df) return transformed_df def get_delete_operations(all_repo_files: set[str], split_names: set[str], config: str) -> list[CommitOperationDelete]: same_config_pattern = re.compile(f'^({re.escape(config)})/') existing_split_pattern = re.compile(f"^({'|'.join((re.escape(f'{config}/{split_name}') for split_name in split_names))})/") existing_partial_split_pattern = re.compile(f"^({'|'.join((re.escape(f'{config}/partial-{split_name}') for split_name in split_names))})/") return [CommitOperationDelete(path_in_repo=file) for file in all_repo_files if same_config_pattern.match(file) and file.endswith('.duckdb') and (not existing_split_pattern.match(file)) and (not existing_partial_split_pattern.match(file))] def compute_split_duckdb_index_response(job_id: str, dataset: str, config: str, split: str, duckdb_index_file_directory: Path, target_revision: str, source_revision: str, hf_endpoint: str, commit_message: str, url_template: str, hf_token: Optional[str], max_split_size_bytes: int, extensions_directory: Optional[str], committer_hf_token: Optional[str], parquet_metadata_directory: StrPath) -> SplitDuckdbIndex: logging.info(f"compute 'split-duckdb-index' for dataset={dataset!r} config={config!r} split={split!r}") config_parquet_metadata_step = 'config-parquet-metadata' parquet_metadata_response = get_previous_step_or_raise(kind=config_parquet_metadata_step, dataset=dataset, config=config) content_parquet_metadata = parquet_metadata_response['content'] try: split_parquet_files = [parquet_file for parquet_file in content_parquet_metadata['parquet_files_metadata'] if parquet_file['config'] == config and parquet_file['split'] == split] if not split_parquet_files: raise ParquetResponseEmptyError('No parquet files found.') split_directory = extract_split_directory_from_parquet_url(split_parquet_files[0]['url']) partial_parquet_export = parquet_export_is_partial(split_parquet_files[0]['url']) (num_parquet_files_to_index, num_bytes, num_rows) = get_num_parquet_files_to_process(parquet_files=split_parquet_files, parquet_metadata_directory=parquet_metadata_directory, max_size_bytes=max_split_size_bytes) index_filename = DUCKDB_DEFAULT_PARTIAL_INDEX_FILENAME if num_parquet_files_to_index < len(split_parquet_files) else DUCKDB_DEFAULT_INDEX_FILENAME partial = partial_parquet_export or num_parquet_files_to_index < len(split_parquet_files) split_parquet_files = split_parquet_files[:num_parquet_files_to_index] parquet_file_names = [parquet_file['filename'] for parquet_file in split_parquet_files] features = content_parquet_metadata['features'] column_names = ','.join((f'"{column}"' for column in features)) indexable_columns = ','.join((f'"{column}"' for column in get_indexable_columns(Features.from_dict(copy.deepcopy(features))))) except KeyError as e: raise PreviousStepFormatError(f"Previous step '{config_parquet_metadata_step}' did not return the expected content.", e) from e for parquet_file in parquet_file_names: download_file_from_hub(repo_type=REPO_TYPE, revision=source_revision, repo_id=dataset, filename=f'{config}/{split_directory}/{parquet_file}', local_dir=duckdb_index_file_directory, hf_token=hf_token, cache_dir=duckdb_index_file_directory, force_download=True, resume_download=False) split_parquet_directory = duckdb_index_file_directory / config / split_directory all_split_parquets = str(split_parquet_directory / '*.parquet') transformed_df = None try: transformed_df = compute_transformed_data(split_parquet_directory, features) except Exception as err: logging.info(f'Unable to compute transformed data {err}, skipping statistics.') db_path = duckdb_index_file_directory.resolve() / index_filename con = duckdb.connect(str(db_path.resolve())) hf_api = HfApi(endpoint=hf_endpoint, token=hf_token) stemmer = None try: if transformed_df is not None: logging.debug(transformed_df.head()) logging.info(f'Updating data with {transformed_df.columns}') create_command_sql = CREATE_TABLE_JOIN_WITH_TRANSFORMED_DATA_COMMAND.format(columns=column_names, source=all_split_parquets) else: create_command_sql = CREATE_TABLE_COMMAND.format(columns=column_names, source=all_split_parquets) logging.info(create_command_sql) con.sql(create_command_sql) con.sql(CREATE_INDEX_ID_COLUMN_COMMANDS) logging.debug(con.sql('SELECT * FROM data LIMIT 5;')) logging.debug(con.sql('SELECT count(*) FROM data;')) if len(indexable_columns) > 0: if extensions_directory is not None: con.execute(SET_EXTENSIONS_DIRECTORY_COMMAND.format(directory=extensions_directory)) con.execute(INSTALL_AND_LOAD_EXTENSION_COMMAND) stemmer = get_monolingual_stemmer(hf_api.dataset_info(repo_id=dataset).card_data) create_index_sql = CREATE_INDEX_COMMAND.format(columns=indexable_columns, stemmer=stemmer) logging.info(create_index_sql) con.sql(create_index_sql) finally: con.close() logging.info(f'about to push index file to {target_revision}') committer_hf_api = HfApi(endpoint=hf_endpoint, token=committer_hf_token) index_file_location = f'{config}/{split_directory}/{index_filename}' try: with lock.git_branch(dataset=dataset, branch=target_revision, owner=job_id, sleeps=LOCK_GIT_BRANCH_RETRY_SLEEPS): logging.debug(f'try to create branch for dataset={dataset!r} with target_revision={target_revision!r} on hf_endpoint={hf_endpoint!r}') create_branch(dataset=dataset, target_revision=target_revision, hf_api=hf_api, committer_hf_api=committer_hf_api) logging.debug(f'get dataset info for dataset={dataset!r} with target_revision={target_revision!r}') target_dataset_info = hf_api.dataset_info(repo_id=dataset, revision=target_revision, files_metadata=False) all_repo_files: set[str] = {f.rfilename for f in target_dataset_info.siblings} delete_operations = get_delete_operations(all_repo_files=all_repo_files, split_names=get_split_names(dataset=dataset, config=config), config=config) logging.debug(f'delete operations for dataset={dataset!r} delete_operations={delete_operations!r}') add_operations: list[CommitOperation] = [CommitOperationAdd(path_in_repo=index_file_location, path_or_fileobj=db_path.resolve())] logging.debug(f'add operations for dataset={dataset!r} add_operations={add_operations!r}') retry_create_commit = retry(on=[HfHubHTTPError], sleeps=HF_HUB_HTTP_ERROR_RETRY_SLEEPS)(committer_hf_api.create_commit) try: retry_create_commit(repo_id=dataset, repo_type=DATASET_TYPE, revision=target_revision, operations=delete_operations + add_operations, commit_message=commit_message, parent_commit=target_dataset_info.sha) except RuntimeError as e: if e.__cause__ and isinstance(e.__cause__, HfHubHTTPError): raise CreateCommitError(message=f'Commit {commit_message} could not be created on the Hub (after {len(HF_HUB_HTTP_ERROR_RETRY_SLEEPS)} attempts).', cause=e.__cause__) from e.__cause__ raise e logging.debug(f'create commit {commit_message} for dataset={dataset!r} add_operations={add_operations!r}') target_dataset_info = hf_api.dataset_info(repo_id=dataset, revision=target_revision, files_metadata=True) logging.debug(f'dataset info for dataset={dataset!r} target_dataset_info={target_dataset_info!r}') except TimeoutError as err: raise LockedDatasetTimeoutError('the dataset is currently locked, please try again later.') from err except RepositoryNotFoundError as err: raise DatasetNotFoundError('The dataset does not exist on the Hub.') from err repo_files = [repo_file for repo_file in target_dataset_info.siblings if repo_file.rfilename == index_file_location] if not repo_files or len(repo_files) != 1: logging.warning(f'Found {len(repo_files)} index files, should be only 1') raise DuckDBIndexFileNotFoundError('No index file was found') repo_file = repo_files[0] if repo_file.size is None: raise ValueError(f'Cannot get size of {repo_file.rfilename}') features[ROW_IDX_COLUMN] = {'dtype': 'int64', '_type': 'Value'} return SplitDuckdbIndex(dataset=dataset, config=config, split=split, url=hf_hub_url(repo_id=dataset, filename=repo_file.rfilename, hf_endpoint=hf_endpoint, revision=target_revision, url_template=url_template), filename=Path(repo_file.rfilename).name, size=repo_file.size, features=features, partial=partial, num_rows=num_rows, num_bytes=num_bytes, duckdb_version=duckdb.__version__, stemmer=stemmer) class SplitDuckDbIndexJobRunner(SplitJobRunnerWithCache): duckdb_index_config: DuckDbIndexConfig def __init__(self, job_info: JobInfo, app_config: AppConfig, duckdb_index_cache_directory: StrPath, parquet_metadata_directory: StrPath) -> None: super().__init__(job_info=job_info, app_config=app_config, cache_directory=Path(duckdb_index_cache_directory) / DUCKDB_INDEX_JOB_RUNNER_SUBDIRECTORY) self.duckdb_index_config = app_config.duckdb_index self.parquet_metadata_directory = parquet_metadata_directory @staticmethod def get_job_type() -> str: return 'split-duckdb-index' def compute(self) -> CompleteJobResult: if self.cache_subdirectory is None: raise CacheDirectoryNotInitializedError('Cache directory has not been initialized.') return CompleteJobResult(compute_split_duckdb_index_response(job_id=self.job_info['job_id'], dataset=self.dataset, config=self.config, split=self.split, duckdb_index_file_directory=self.cache_subdirectory, hf_token=self.app_config.common.hf_token, url_template=self.duckdb_index_config.url_template, commit_message=self.duckdb_index_config.commit_message, extensions_directory=self.duckdb_index_config.extensions_directory, committer_hf_token=self.duckdb_index_config.committer_hf_token, hf_endpoint=self.app_config.common.hf_endpoint, target_revision=self.duckdb_index_config.target_revision, source_revision=self.app_config.parquet_and_info.target_revision, max_split_size_bytes=self.duckdb_index_config.max_split_size_bytes, parquet_metadata_directory=self.parquet_metadata_directory)) # File: dataset-viewer-main/services/worker/src/worker/job_runners/split/first_rows.py import logging from pathlib import Path from typing import Optional from datasets import IterableDataset, get_dataset_config_info, load_dataset from fsspec.implementations.http import HTTPFileSystem from libcommon.constants import MAX_NUM_ROWS_PER_PAGE from libcommon.dtos import JobInfo, RowsContent, SplitFirstRowsResponse from libcommon.exceptions import DatasetWithScriptNotSupportedError, FeaturesError, InfoError, ParquetResponseEmptyError, SplitParquetSchemaMismatchError, TooBigContentError from libcommon.parquet_utils import EmptyParquetMetadataError, Indexer, SchemaMismatchError, TooBigRows from libcommon.simple_cache import CachedArtifactError, CachedArtifactNotFoundError from libcommon.storage import StrPath from libcommon.storage_client import StorageClient from libcommon.viewer_utils.rows import create_first_rows_response from worker.config import AppConfig, FirstRowsConfig from worker.dtos import CompleteJobResult from worker.job_runners.split.split_job_runner import SplitJobRunnerWithDatasetsCache from worker.utils import get_rows_or_raise, raise_if_long_column_name def compute_first_rows_from_parquet_response(dataset: str, revision: str, config: str, split: str, storage_client: StorageClient, min_cell_bytes: int, rows_max_bytes: int, rows_max_number: int, rows_min_number: int, columns_max_number: int, indexer: Indexer) -> SplitFirstRowsResponse: logging.info(f"compute 'split-first-rows' from parquet for dataset={dataset!r} config={config!r} split={split!r}") try: rows_index = indexer.get_rows_index(dataset=dataset, config=config, split=split) except EmptyParquetMetadataError: raise ParquetResponseEmptyError('No parquet files found.') features = rows_index.parquet_index.features def get_rows_content(rows_max_number: int) -> RowsContent: try: truncated_columns: list[str] = [] if dataset == 'Major-TOM/Core-S2L2A': (pa_table, truncated_columns) = rows_index.query_truncated_binary(offset=0, length=rows_max_number) else: pa_table = rows_index.query(offset=0, length=rows_max_number) return RowsContent(rows=pa_table.to_pylist(), all_fetched=rows_index.parquet_index.num_rows_total <= rows_max_number, truncated_columns=truncated_columns) except TooBigRows as err: raise TooBigContentError(str(err)) except SchemaMismatchError as err: raise SplitParquetSchemaMismatchError('Split parquet files being processed have different schemas. Ensure all files have identical column names.', cause=err) return create_first_rows_response(dataset=dataset, revision=revision, config=config, split=split, storage_client=storage_client, features=features, get_rows_content=get_rows_content, min_cell_bytes=min_cell_bytes, rows_max_bytes=rows_max_bytes, rows_max_number=rows_max_number, rows_min_number=rows_min_number, columns_max_number=columns_max_number) def compute_first_rows_from_streaming_response(dataset: str, revision: str, config: str, split: str, storage_client: StorageClient, hf_token: Optional[str], min_cell_bytes: int, rows_max_bytes: int, rows_max_number: int, rows_min_number: int, columns_max_number: int, max_size_fallback: Optional[int]=None) -> SplitFirstRowsResponse: logging.info(f"compute 'split-first-rows' from streaming for dataset={dataset!r} config={config!r} split={split!r}") try: info = get_dataset_config_info(path=dataset, config_name=config, token=hf_token) except Exception as err: if isinstance(err, ValueError) and 'trust_remote_code' in str(err): raise DatasetWithScriptNotSupportedError from err raise InfoError(f"The info cannot be fetched for the config '{config}' of the dataset.", cause=err) from err if not info.features: try: iterable_dataset = load_dataset(path=dataset, name=config, split=split, streaming=True, token=hf_token) if not isinstance(iterable_dataset, IterableDataset): raise TypeError('load_dataset should return an IterableDataset.') iterable_dataset = iterable_dataset._resolve_features() if not isinstance(iterable_dataset, IterableDataset): raise TypeError('load_dataset should return an IterableDataset.') features = iterable_dataset.features except Exception as err: if isinstance(err, ValueError) and 'trust_remote_code' in str(err): raise DatasetWithScriptNotSupportedError from err raise FeaturesError(f"Cannot extract the features (columns) for the split '{split}' of the config '{config}' of the dataset.", cause=err) from err else: features = info.features raise_if_long_column_name(features) def get_rows_content(rows_max_number: int) -> RowsContent: return get_rows_or_raise(dataset=dataset, config=config, split=split, info=info, max_size_fallback=max_size_fallback, rows_max_number=rows_max_number, token=hf_token) return create_first_rows_response(dataset=dataset, revision=revision, config=config, split=split, storage_client=storage_client, features=features, get_rows_content=get_rows_content, min_cell_bytes=min_cell_bytes, rows_max_bytes=rows_max_bytes, rows_max_number=rows_max_number, rows_min_number=rows_min_number, columns_max_number=columns_max_number) class SplitFirstRowsJobRunner(SplitJobRunnerWithDatasetsCache): first_rows_config: FirstRowsConfig indexer: Indexer @staticmethod def get_job_type() -> str: return 'split-first-rows' def __init__(self, job_info: JobInfo, app_config: AppConfig, hf_datasets_cache: Path, parquet_metadata_directory: StrPath, storage_client: StorageClient) -> None: super().__init__(job_info=job_info, app_config=app_config, hf_datasets_cache=hf_datasets_cache) self.first_rows_config = app_config.first_rows self.parquet_metadata_directory = parquet_metadata_directory self.indexer = Indexer(hf_token=self.app_config.common.hf_token, parquet_metadata_directory=parquet_metadata_directory, httpfs=HTTPFileSystem(headers={'authorization': f'Bearer {self.app_config.common.hf_token}'}), unsupported_features=[], all_columns_supported_datasets_allow_list='all', max_arrow_data_in_memory=app_config.rows_index.max_arrow_data_in_memory) self.storage_client = storage_client def compute(self) -> CompleteJobResult: try: return CompleteJobResult(compute_first_rows_from_parquet_response(dataset=self.dataset, revision=self.dataset_git_revision, config=self.config, split=self.split, storage_client=self.storage_client, min_cell_bytes=self.first_rows_config.min_cell_bytes, rows_max_bytes=self.first_rows_config.max_bytes, rows_min_number=self.first_rows_config.min_number, rows_max_number=MAX_NUM_ROWS_PER_PAGE, columns_max_number=self.first_rows_config.columns_max_number, indexer=self.indexer)) except (ParquetResponseEmptyError, SplitParquetSchemaMismatchError, CachedArtifactNotFoundError, CachedArtifactError): logging.info(f"Cannot compute 'split-first-rows' from parquet for self.dataset={self.dataset!r} self.config={self.config!r}. Trying to compute it using streaming.") pass return CompleteJobResult(compute_first_rows_from_streaming_response(dataset=self.dataset, revision=self.dataset_git_revision, config=self.config, split=self.split, storage_client=self.storage_client, hf_token=self.app_config.common.hf_token, min_cell_bytes=self.first_rows_config.min_cell_bytes, rows_max_bytes=self.first_rows_config.max_bytes, rows_min_number=self.first_rows_config.min_number, rows_max_number=MAX_NUM_ROWS_PER_PAGE, columns_max_number=self.first_rows_config.columns_max_number)) # File: dataset-viewer-main/services/worker/src/worker/job_runners/split/image_url_columns.py import logging from libcommon.dtos import SplitFirstRowsResponse from libcommon.exceptions import PreviousStepFormatError from libcommon.simple_cache import get_previous_step_or_raise from libcommon.utils import is_image_url from worker.dtos import CompleteJobResult, ImageUrlColumnsResponse from worker.job_runners.split.split_job_runner import SplitJobRunner STRING_FEATURE_DTYPE = 'string' VALUE_FEATURE_TYPE = 'Value' URL_COLUMN_RATION = 0.3 def compute_image_url_columns(dataset: str, config: str, split: str) -> ImageUrlColumnsResponse: logging.info(f"compute 'split-image-url-columns' for dataset={dataset!r} config={config!r} split={split!r}") upstream_response = get_previous_step_or_raise(kind='split-first-rows', dataset=dataset, config=config, split=split) try: first_rows_response = upstream_response upstream_response_content = SplitFirstRowsResponse(dataset=dataset, config=config, split=split, features=first_rows_response['content']['features'], rows=first_rows_response['content']['rows'], truncated=first_rows_response['content']['truncated'] if 'truncated' in first_rows_response['content'] else None) features = upstream_response_content['features'] first_rows = upstream_response_content['rows'] except KeyError as e: raise PreviousStepFormatError('Previous step did not return the expected content.', e) from e string_columns = [feature['name'] for feature in features if 'dtype' in feature['type'] and '_type' in feature['type'] and (feature['type']['dtype'] == STRING_FEATURE_DTYPE) and (feature['type']['_type'] == VALUE_FEATURE_TYPE)] first_rows_size = len(first_rows) if first_rows_size == 0: return ImageUrlColumnsResponse(columns=[]) urls_columns = [] for string_column in string_columns: urls_count = sum((1 for row in first_rows if isinstance(row['row'].get(string_column), str) and is_image_url(text=row['row'][string_column]))) if urls_count and urls_count / first_rows_size > URL_COLUMN_RATION: urls_columns.append(string_column) return ImageUrlColumnsResponse(columns=urls_columns) class SplitImageUrlColumnsJobRunner(SplitJobRunner): @staticmethod def get_job_type() -> str: return 'split-image-url-columns' def compute(self) -> CompleteJobResult: return CompleteJobResult(compute_image_url_columns(dataset=self.dataset, config=self.config, split=self.split)) # File: dataset-viewer-main/services/worker/src/worker/job_runners/split/is_valid.py import logging from libcommon.constants import CONFIG_HAS_VIEWER_KIND, SPLIT_HAS_PREVIEW_KIND, SPLIT_HAS_SEARCH_KIND, SPLIT_HAS_STATISTICS_KIND from libcommon.dtos import JobInfo from libcommon.simple_cache import get_previous_step_or_raise, is_successful_response from worker.config import AppConfig from worker.dtos import CompleteJobResult, IsValidResponse, JobResult from worker.job_runners.split.split_job_runner import SplitJobRunner def compute_is_valid_response(dataset: str, config: str, split: str) -> IsValidResponse: logging.info(f"compute 'split-is-valid' response for dataset={dataset!r}") viewer = is_successful_response(dataset=dataset, config=config, split=None, kind=CONFIG_HAS_VIEWER_KIND) preview = is_successful_response(dataset=dataset, config=config, split=split, kind=SPLIT_HAS_PREVIEW_KIND) try: duckdb_response = get_previous_step_or_raise(kind=SPLIT_HAS_SEARCH_KIND, dataset=dataset, config=config, split=split) search_content = duckdb_response['content'] filter = True search = search_content['stemmer'] is not None except Exception: filter = False search = False statistics = is_successful_response(dataset=dataset, config=config, split=split, kind=SPLIT_HAS_STATISTICS_KIND) return IsValidResponse(viewer=viewer, preview=preview, search=search, filter=filter, statistics=statistics) class SplitIsValidJobRunner(SplitJobRunner): @staticmethod def get_job_type() -> str: return 'split-is-valid' def __init__(self, job_info: JobInfo, app_config: AppConfig) -> None: super().__init__(job_info=job_info, app_config=app_config) def compute(self) -> JobResult: return CompleteJobResult(compute_is_valid_response(dataset=self.dataset, config=self.config, split=self.split)) # File: dataset-viewer-main/services/worker/src/worker/job_runners/split/opt_in_out_urls_count.py import logging from libcommon.exceptions import PreviousStepFormatError from libcommon.simple_cache import get_previous_step_or_raise from worker.dtos import CompleteJobResult, OptInOutUrlsCountResponse from worker.job_runners.split.split_job_runner import SplitJobRunner def compute_opt_in_out_urls_count_response(dataset: str, config: str, split: str) -> OptInOutUrlsCountResponse: logging.info(f"compute 'split-opt-in-out-urls-count' for dataset={dataset!r} config={config!r} split={split!r}") opt_in_out_urls_scan_response = get_previous_step_or_raise(kind='split-opt-in-out-urls-scan', dataset=dataset, config=config, split=split) try: content = opt_in_out_urls_scan_response['content'] opt_in_out_urls_count = OptInOutUrlsCountResponse(has_urls_columns=content['has_urls_columns'], num_opt_in_urls=content['num_opt_in_urls'], num_opt_out_urls=content['num_opt_out_urls'], num_scanned_rows=content['num_scanned_rows'], num_urls=content['num_urls'], urls_columns=content['urls_columns'], full_scan=content['full_scan']) except KeyError as e: raise PreviousStepFormatError('Previous step did not return the expected content.', e) from e return opt_in_out_urls_count class SplitOptInOutUrlsCountJobRunner(SplitJobRunner): @staticmethod def get_job_type() -> str: return 'split-opt-in-out-urls-count' def compute(self) -> CompleteJobResult: return CompleteJobResult(compute_opt_in_out_urls_count_response(dataset=self.dataset, config=self.config, split=self.split)) # File: dataset-viewer-main/services/worker/src/worker/job_runners/split/opt_in_out_urls_scan_from_streaming.py import logging from asyncio import Semaphore, create_task, run, wait from pathlib import Path from typing import Any, Optional from aiohttp import ClientSession from aiolimiter import AsyncLimiter from datasets import get_dataset_config_info from libcommon.dtos import JobInfo from libcommon.exceptions import DatasetWithScriptNotSupportedError, ExternalServerError, InfoError, MissingSpawningTokenError, PreviousStepFormatError, TooManyColumnsError from libcommon.simple_cache import get_previous_step_or_raise from worker.config import AppConfig, OptInOutUrlsScanConfig from worker.dtos import CompleteJobResult, OptInOutUrlsScanResponse, OptUrl from worker.job_runners.split.split_job_runner import SplitJobRunnerWithDatasetsCache from worker.utils import get_rows_or_raise async def check_spawning(image_urls: list[str], session: ClientSession, semaphore: Semaphore, limiter: AsyncLimiter, spawning_url: str) -> Any: if not image_urls: return {'urls': []} elif len(image_urls) == 1: image_urls = image_urls + [''] async with semaphore: async with limiter: async with session.post(url=spawning_url, data='\n'.join(image_urls)) as resp: spawning_response = await resp.json() return spawning_response async def opt_in_out_task(image_urls: list[str], session: ClientSession, semaphore: Semaphore, limiter: AsyncLimiter, spawning_url: str) -> tuple[list[Any], list[Any]]: try: spawning_response = await check_spawning(image_urls, session, semaphore, limiter, spawning_url) except Exception as err: raise ExternalServerError(message=f'Error when trying to connect to {spawning_url}', cause=err) from err if 'urls' not in spawning_response: raise ExternalServerError(message=f"Error when trying to connect to {spawning_url}: '{spawning_response}'") opt_in_urls_indices = [i for i in range(len(image_urls)) if spawning_response['urls'][i]['optIn']] opt_out_urls_indices = [i for i in range(len(image_urls)) if spawning_response['urls'][i]['optOut']] return (opt_in_urls_indices, opt_out_urls_indices) async def opt_in_out_scan_urls(urls: list[str], urls_number_per_batch: int, spawning_token: str, max_concurrent_requests_number: int, max_requests_per_second: int, spawning_url: str) -> tuple[list[int], list[int]]: offsets = [] tasks = [] semaphore = Semaphore(value=max_concurrent_requests_number) limiter = AsyncLimiter(max_requests_per_second, time_period=1) headers = {'Authorization': f'API {spawning_token}'} async with ClientSession(headers=headers) as session: for offset in range(0, len(urls), urls_number_per_batch): offsets.append(offset) limit = offset + urls_number_per_batch tasks.append(create_task(opt_in_out_task(urls[offset:limit], session, semaphore, limiter, spawning_url))) await wait(tasks) opt_in_urls_indices = [] opt_out_urls_indices = [] for (offset, task) in zip(offsets, tasks): (batch_opt_in_urls_indices, batch_opt_out_urls_indices) = task.result() for batch_opt_in_urls_idx in batch_opt_in_urls_indices: opt_in_urls_indices.append(offset + batch_opt_in_urls_idx) for batch_opt_out_urls_idx in batch_opt_out_urls_indices: opt_out_urls_indices.append(offset + batch_opt_out_urls_idx) return (opt_in_urls_indices, opt_out_urls_indices) def compute_opt_in_out_urls_scan_response(dataset: str, config: str, split: str, hf_token: Optional[str], rows_max_number: int, columns_max_number: int, urls_number_per_batch: int, spawning_token: Optional[str], max_concurrent_requests_number: int, max_requests_per_second: int, spawning_url: str) -> OptInOutUrlsScanResponse: logging.info(f"compute 'split-opt-in-out-urls-scan' for dataset={dataset!r} config={config!r} split={split!r}") if not spawning_token: raise MissingSpawningTokenError('OPT_IN_OUT_URLS_SCAN_SPAWNING_TOKEN is not set') upstream_response = get_previous_step_or_raise(kind='split-image-url-columns', dataset=dataset, config=config, split=split) try: image_url_columns_response = upstream_response image_url_columns = image_url_columns_response['content']['columns'] except KeyError as e: raise PreviousStepFormatError('Previous step did not return the expected content.', e) from e try: info = get_dataset_config_info(path=dataset, config_name=config, token=hf_token) except Exception as err: if isinstance(err, ValueError) and 'trust_remote_code' in str(err): raise DatasetWithScriptNotSupportedError from err raise InfoError(f"The info cannot be fetched for the config '{config}' of the dataset.", cause=err) from err if not image_url_columns: return OptInOutUrlsScanResponse(urls_columns=[], opt_in_urls=[], opt_out_urls=[], num_opt_in_urls=0, num_opt_out_urls=0, num_urls=0, num_scanned_rows=0, has_urls_columns=False, full_scan=None) if len(image_url_columns) > columns_max_number: raise TooManyColumnsError(f'The number of columns ({len(image_url_columns)}) exceeds the maximum supported number of columns to scan ({columns_max_number}).') rows_content = get_rows_or_raise(dataset=dataset, config=config, split=split, info=info, rows_max_number=rows_max_number, token=hf_token, column_names=image_url_columns) rows = rows_content.rows num_scanned_rows = len(rows) urls = [row[urls_column] if row[urls_column] else '' for row in rows for urls_column in image_url_columns] (opt_in_urls_indices, opt_out_urls_indices) = run(opt_in_out_scan_urls(urls, urls_number_per_batch=urls_number_per_batch, spawning_token=spawning_token, max_concurrent_requests_number=max_concurrent_requests_number, max_requests_per_second=max_requests_per_second, spawning_url=spawning_url)) opt_in_urls = [OptUrl(url=urls[url_idx], row_idx=url_idx // len(image_url_columns), column_name=image_url_columns[url_idx % len(image_url_columns)]) for url_idx in opt_in_urls_indices] opt_out_urls = [OptUrl(url=urls[url_idx], row_idx=url_idx // len(image_url_columns), column_name=image_url_columns[url_idx % len(image_url_columns)]) for url_idx in opt_out_urls_indices] return OptInOutUrlsScanResponse(urls_columns=image_url_columns, opt_in_urls=opt_in_urls, opt_out_urls=opt_out_urls, num_opt_in_urls=len(opt_in_urls), num_opt_out_urls=len(opt_out_urls), num_urls=len(urls), num_scanned_rows=num_scanned_rows, has_urls_columns=True, full_scan=rows_content.all_fetched) class SplitOptInOutUrlsScanJobRunner(SplitJobRunnerWithDatasetsCache): urls_scan_config: OptInOutUrlsScanConfig @staticmethod def get_job_type() -> str: return 'split-opt-in-out-urls-scan' def __init__(self, job_info: JobInfo, app_config: AppConfig, hf_datasets_cache: Path) -> None: super().__init__(job_info=job_info, app_config=app_config, hf_datasets_cache=hf_datasets_cache) self.urls_scan_config = app_config.urls_scan def compute(self) -> CompleteJobResult: return CompleteJobResult(compute_opt_in_out_urls_scan_response(dataset=self.dataset, config=self.config, split=self.split, hf_token=self.app_config.common.hf_token, rows_max_number=self.urls_scan_config.rows_max_number, columns_max_number=self.urls_scan_config.columns_max_number, urls_number_per_batch=self.urls_scan_config.urls_number_per_batch, spawning_token=self.urls_scan_config.spawning_token, max_concurrent_requests_number=self.urls_scan_config.max_concurrent_requests_number, max_requests_per_second=self.urls_scan_config.max_requests_per_second, spawning_url=self.urls_scan_config.spawning_url)) # File: dataset-viewer-main/services/worker/src/worker/job_runners/split/presidio_scan.py import logging import re from collections import Counter from collections.abc import Iterable from itertools import count from pathlib import Path from typing import Any, Optional from datasets import DatasetInfo, Features, Value from datasets.features.features import FeatureType, _visit from libcommon.dtos import JobInfo, Row from libcommon.exceptions import PresidioScanNotEnabledForThisDataset, PreviousStepFormatError, TooManyColumnsError from libcommon.simple_cache import get_previous_step_or_raise from presidio_analyzer import AnalyzerEngine, BatchAnalyzerEngine, RecognizerResult from worker.config import AppConfig, PresidioEntitiesScanConfig from worker.dtos import CompleteJobResult, ConfigParquetAndInfoResponse, PresidioEntitiesScanResponse, PresidioEntity from worker.job_runners.split.split_job_runner import SplitJobRunnerWithDatasetsCache from worker.utils import batched, get_rows_or_raise BATCH_SIZE = 10 batch_analyzer: Optional[BatchAnalyzerEngine] = None def mask(text: str) -> str: return ' '.join((word[:min(2, len(word) - 1)] + re.sub('[A-Za-z0-9]', '*', word[min(2, len(word) - 1):]) for word in text.split(' '))) def get_strings(row_content: Any) -> str: if isinstance(row_content, str): return row_content if isinstance(row_content, dict): row_content = list(row_content.values()) if isinstance(row_content, list): str_items = (get_strings(row_content_item) for row_content_item in row_content) return '\n'.join((str_item for str_item in str_items if str_item)) return '' def _simple_analyze_iterator_cache(batch_analyzer: BatchAnalyzerEngine, texts: Iterable[str], language: str, score_threshold: float, cache: dict[str, list[RecognizerResult]]) -> list[list[RecognizerResult]]: not_cached_results = iter(batch_analyzer.analyze_iterator((text for text in texts if text not in cache), language=language, score_threshold=score_threshold)) results = [cache[text] if text in cache else next(not_cached_results) for text in texts] cache.clear() cache.update(dict(zip(texts, results))) return results def analyze(batch_analyzer: BatchAnalyzerEngine, batch: list[dict[str, str]], indices: Iterable[int], scanned_columns: list[str], columns_descriptions: list[str], cache: Optional[dict[str, list[RecognizerResult]]]=None) -> list[PresidioEntity]: cache = {} if cache is None else cache texts = [f"The following is {columns_description} data:\n\n{example[column_name] or ''}" for example in batch for (column_name, columns_description) in zip(scanned_columns, columns_descriptions)] return [PresidioEntity(text=texts[i * len(scanned_columns) + j][recognizer_result.start:recognizer_result.end], type=recognizer_result.entity_type, row_idx=row_idx, column_name=column_name) for (i, row_idx, recognizer_row_results) in zip(count(), indices, batched(_simple_analyze_iterator_cache(batch_analyzer, texts, language='en', score_threshold=0.8, cache=cache), len(scanned_columns))) for (j, column_name, columns_description, recognizer_results) in zip(count(), scanned_columns, columns_descriptions, recognizer_row_results) for recognizer_result in recognizer_results if recognizer_result.start >= len(f'The following is {columns_description} data:\n\n')] def presidio_scan_entities(rows: list[Row], scanned_columns: list[str], columns_descriptions: list[str], max_text_length: int, disable_masks: bool=False) -> list[PresidioEntity]: global batch_analyzer cache: dict[str, list[RecognizerResult]] = {} if batch_analyzer is None: batch_analyser = BatchAnalyzerEngine(AnalyzerEngine()) presidio_entities: list[PresidioEntity] = [] rows_with_scanned_columns_only = ({column_name: get_strings(row[column_name])[:max_text_length] for column_name in scanned_columns} for row in rows) for (indices, batch) in batched(rows_with_scanned_columns_only, BATCH_SIZE, with_indices=True): for presidio_entitiy in analyze(batch_analyzer=batch_analyser, batch=batch, indices=indices, scanned_columns=scanned_columns, columns_descriptions=columns_descriptions, cache=cache): presidio_entities.append(PresidioEntity(text=presidio_entitiy['text'] if disable_masks else mask(presidio_entitiy['text']), type=presidio_entitiy['type'], row_idx=presidio_entitiy['row_idx'], column_name=presidio_entitiy['column_name'])) return presidio_entities def get_columns_with_strings(features: Features) -> list[str]: columns_with_strings: list[str] = [] for (column, feature) in features.items(): str_column = str(column) with_string = False def classify(feature: FeatureType) -> None: nonlocal with_string if isinstance(feature, Value) and feature.dtype == 'string': with_string = True _visit(feature, classify) if with_string: columns_with_strings.append(str_column) return columns_with_strings def get_column_description(column_name: str, feature: FeatureType) -> str: nested_fields: list[str] = [] def get_nested_field_names(feature: FeatureType) -> None: nonlocal nested_fields if isinstance(feature, dict): nested_fields += list(feature) _visit(feature, get_nested_field_names) return f"{column_name} (with {', '.join(nested_fields)})" if nested_fields else column_name def compute_presidio_entities_scan_response(dataset: str, config: str, split: str, hf_token: Optional[str], rows_max_number: int, columns_max_number: int, max_text_length: int) -> PresidioEntitiesScanResponse: if not ('email' in dataset or 'pii' in dataset or 'presidio' in dataset or ('ssn' in dataset) or ('DVUser/' in dataset) or (dataset in enabled_datasets)): raise PresidioScanNotEnabledForThisDataset(dataset) logging.info(f"compute 'split-presidio-scan' for dataset={dataset!r} config={config!r} split={split!r}") parquet_and_info_response = get_previous_step_or_raise(kind='config-parquet-and-info', dataset=dataset, config=config) try: upstream_response_content = ConfigParquetAndInfoResponse(parquet_files=parquet_and_info_response['content']['parquet_files'], dataset_info=parquet_and_info_response['content']['dataset_info'], partial=parquet_and_info_response['content']['partial'], estimated_dataset_info=parquet_and_info_response['content'].get('estimated_dataset_info')) info = DatasetInfo.from_dict(upstream_response_content['dataset_info']) if info.features is None: raise PreviousStepFormatError('Previous step did not return the expected content (missing features).') features = info.features except KeyError as e: raise PreviousStepFormatError('Previous step did not return the expected content.', e) from e scanned_columns = get_columns_with_strings(features) columns_descriptions = [get_column_description(column_name, features[column_name]) for column_name in scanned_columns] if not scanned_columns: return PresidioEntitiesScanResponse(scanned_columns=scanned_columns, num_in_vehicle_registration_entities=0, num_organization_entities=0, num_sg_nric_fin_entities=0, num_person_entities=0, num_credit_card_entities=0, num_medical_license_entities=0, num_nrp_entities=0, num_us_ssn_entities=0, num_crypto_entities=0, num_date_time_entities=0, num_location_entities=0, num_us_driver_license_entities=0, num_phone_number_entities=0, num_url_entities=0, num_us_passport_entities=0, num_age_entities=0, num_au_acn_entities=0, num_email_address_entities=0, num_in_pan_entities=0, num_ip_address_entities=0, num_id_entities=0, num_us_bank_number_entities=0, num_in_aadhaar_entities=0, num_us_itin_entities=0, num_au_medicare_entities=0, num_iban_code_entities=0, num_au_tfn_entities=0, num_uk_nhs_entities=0, num_email_entities=0, num_au_abn_entities=0, num_rows_with_in_vehicle_registration_entities=0, num_rows_with_organization_entities=0, num_rows_with_sg_nric_fin_entities=0, num_rows_with_person_entities=0, num_rows_with_credit_card_entities=0, num_rows_with_medical_license_entities=0, num_rows_with_nrp_entities=0, num_rows_with_us_ssn_entities=0, num_rows_with_crypto_entities=0, num_rows_with_date_time_entities=0, num_rows_with_location_entities=0, num_rows_with_us_driver_license_entities=0, num_rows_with_phone_number_entities=0, num_rows_with_url_entities=0, num_rows_with_us_passport_entities=0, num_rows_with_age_entities=0, num_rows_with_au_acn_entities=0, num_rows_with_email_address_entities=0, num_rows_with_in_pan_entities=0, num_rows_with_ip_address_entities=0, num_rows_with_id_entities=0, num_rows_with_us_bank_number_entities=0, num_rows_with_in_aadhaar_entities=0, num_rows_with_us_itin_entities=0, num_rows_with_au_medicare_entities=0, num_rows_with_iban_code_entities=0, num_rows_with_au_tfn_entities=0, num_rows_with_uk_nhs_entities=0, num_rows_with_email_entities=0, num_rows_with_au_abn_entities=0, num_scanned_rows=0, has_scanned_columns=False, full_scan=None, entities=[]) if len(scanned_columns) > columns_max_number: raise TooManyColumnsError(f'The number of columns ({len(scanned_columns)}) exceeds the maximum supported number of columns to scan ({columns_max_number}).') rows_content = get_rows_or_raise(dataset=dataset, config=config, split=split, info=info, rows_max_number=rows_max_number, token=hf_token, column_names=scanned_columns) rows = rows_content.rows num_scanned_rows = len(rows) presidio_entities = presidio_scan_entities(rows, scanned_columns=scanned_columns, columns_descriptions=columns_descriptions, max_text_length=max_text_length) entity_type_counter = Counter((presidio_entity['type'] for presidio_entity in presidio_entities)) entity_type_and_row_idx_pairs = set(((presidio_entity['type'], presidio_entity['row_idx']) for presidio_entity in presidio_entities)) rows_per_entity_type_counter = Counter((entity_type for (entity_type, _) in entity_type_and_row_idx_pairs)) return PresidioEntitiesScanResponse(scanned_columns=scanned_columns, num_in_vehicle_registration_entities=entity_type_counter.get('IN_VEHICLE_REGISTRATION', 0), num_organization_entities=entity_type_counter.get('ORGANIZATION', 0), num_sg_nric_fin_entities=entity_type_counter.get('SG_NRIC_FIN', 0), num_person_entities=entity_type_counter.get('PERSON', 0), num_credit_card_entities=entity_type_counter.get('CREDIT_CARD', 0), num_medical_license_entities=entity_type_counter.get('MEDICAL_LICENSE', 0), num_nrp_entities=entity_type_counter.get('NRP', 0), num_us_ssn_entities=entity_type_counter.get('US_SSN', 0), num_crypto_entities=entity_type_counter.get('CRYPTO', 0), num_date_time_entities=entity_type_counter.get('DATE_TIME', 0), num_location_entities=entity_type_counter.get('LOCATION', 0), num_us_driver_license_entities=entity_type_counter.get('US_DRIVER_LICENSE', 0), num_phone_number_entities=entity_type_counter.get('PHONE_NUMBER', 0), num_url_entities=entity_type_counter.get('URL', 0), num_us_passport_entities=entity_type_counter.get('US_PASSPORT', 0), num_age_entities=entity_type_counter.get('AGE', 0), num_au_acn_entities=entity_type_counter.get('AU_ACN', 0), num_email_address_entities=entity_type_counter.get('EMAIL_ADDRESS', 0), num_in_pan_entities=entity_type_counter.get('IN_PAN', 0), num_ip_address_entities=entity_type_counter.get('IP_ADDRESS', 0), num_id_entities=entity_type_counter.get('ID', 0), num_us_bank_number_entities=entity_type_counter.get('US_BANK_NUMBER', 0), num_in_aadhaar_entities=entity_type_counter.get('IN_AADHAAR', 0), num_us_itin_entities=entity_type_counter.get('US_ITIN', 0), num_au_medicare_entities=entity_type_counter.get('AU_MEDICARE', 0), num_iban_code_entities=entity_type_counter.get('IBAN_CODE', 0), num_au_tfn_entities=entity_type_counter.get('AU_TFN', 0), num_uk_nhs_entities=entity_type_counter.get('UK_NHS', 0), num_email_entities=entity_type_counter.get('EMAIL', 0), num_au_abn_entities=entity_type_counter.get('AU_ABN', 0), num_rows_with_in_vehicle_registration_entities=rows_per_entity_type_counter.get('IN_VEHICLE_REGISTRATION', 0), num_rows_with_organization_entities=rows_per_entity_type_counter.get('ORGANIZATION', 0), num_rows_with_sg_nric_fin_entities=rows_per_entity_type_counter.get('SG_NRIC_FIN', 0), num_rows_with_person_entities=rows_per_entity_type_counter.get('PERSON', 0), num_rows_with_credit_card_entities=rows_per_entity_type_counter.get('CREDIT_CARD', 0), num_rows_with_medical_license_entities=rows_per_entity_type_counter.get('MEDICAL_LICENSE', 0), num_rows_with_nrp_entities=rows_per_entity_type_counter.get('NRP', 0), num_rows_with_us_ssn_entities=rows_per_entity_type_counter.get('US_SSN', 0), num_rows_with_crypto_entities=rows_per_entity_type_counter.get('CRYPTO', 0), num_rows_with_date_time_entities=rows_per_entity_type_counter.get('DATE_TIME', 0), num_rows_with_location_entities=rows_per_entity_type_counter.get('LOCATION', 0), num_rows_with_us_driver_license_entities=rows_per_entity_type_counter.get('US_DRIVER_LICENSE', 0), num_rows_with_phone_number_entities=rows_per_entity_type_counter.get('PHONE_NUMBER', 0), num_rows_with_url_entities=rows_per_entity_type_counter.get('URL', 0), num_rows_with_us_passport_entities=rows_per_entity_type_counter.get('US_PASSPORT', 0), num_rows_with_age_entities=rows_per_entity_type_counter.get('AGE', 0), num_rows_with_au_acn_entities=rows_per_entity_type_counter.get('AU_ACN', 0), num_rows_with_email_address_entities=rows_per_entity_type_counter.get('EMAIL_ADDRESS', 0), num_rows_with_in_pan_entities=rows_per_entity_type_counter.get('IN_PAN', 0), num_rows_with_ip_address_entities=rows_per_entity_type_counter.get('IP_ADDRESS', 0), num_rows_with_id_entities=rows_per_entity_type_counter.get('ID', 0), num_rows_with_us_bank_number_entities=rows_per_entity_type_counter.get('US_BANK_NUMBER', 0), num_rows_with_in_aadhaar_entities=rows_per_entity_type_counter.get('IN_AADHAAR', 0), num_rows_with_us_itin_entities=rows_per_entity_type_counter.get('US_ITIN', 0), num_rows_with_au_medicare_entities=rows_per_entity_type_counter.get('AU_MEDICARE', 0), num_rows_with_iban_code_entities=rows_per_entity_type_counter.get('IBAN_CODE', 0), num_rows_with_au_tfn_entities=rows_per_entity_type_counter.get('AU_TFN', 0), num_rows_with_uk_nhs_entities=rows_per_entity_type_counter.get('UK_NHS', 0), num_rows_with_email_entities=rows_per_entity_type_counter.get('EMAIL', 0), num_rows_with_au_abn_entities=rows_per_entity_type_counter.get('AU_ABN', 0), num_scanned_rows=num_scanned_rows, has_scanned_columns=True, full_scan=rows_content.all_fetched, entities=presidio_entities) class SplitPresidioEntitiesScanJobRunner(SplitJobRunnerWithDatasetsCache): presidio_entities_scan_config: PresidioEntitiesScanConfig @staticmethod def get_job_type() -> str: return 'split-presidio-scan' def __init__(self, job_info: JobInfo, app_config: AppConfig, hf_datasets_cache: Path) -> None: super().__init__(job_info=job_info, app_config=app_config, hf_datasets_cache=hf_datasets_cache) self.presidio_entities_scan_config = app_config.presidio_scan def compute(self) -> CompleteJobResult: return CompleteJobResult(compute_presidio_entities_scan_response(dataset=self.dataset, config=self.config, split=self.split, hf_token=self.app_config.common.hf_token, rows_max_number=self.presidio_entities_scan_config.rows_max_number, columns_max_number=self.presidio_entities_scan_config.columns_max_number, max_text_length=self.presidio_entities_scan_config.max_text_length)) top_2k_most_liked_datasets = {'fka/awesome-chatgpt-prompts', 'Open-Orca/OpenOrca', 'OpenAssistant/oasst1', 'HuggingFaceFW/fineweb', 'gsdf/EasyNegative', 'Anthropic/hh-rlhf', 'togethercomputer/RedPajama-Data-1T', 'Nerfgun3/bad_prompt', 'tiiuae/falcon-refinedweb', 'allenai/dolma', 'anon8231489123/ShareGPT_Vicuna_unfiltered', 'bigcode/the-stack', 'QingyiSi/Alpaca-CoT', 'databricks/databricks-dolly-15k', 'tatsu-lab/alpaca', 'teknium/OpenHermes-2.5', 'JosephusCheung/GuanacoDataset', 'legacy-datasets/wikipedia', 'HuggingFaceTB/cosmopedia', 'm-a-p/COIG-CQIA', 'lmsys/lmsys-chat-1m', 'poloclub/diffusiondb', 'liwu/MNBVC', 'Gustavosta/Stable-Diffusion-Prompts', 'BAAI/COIG', 'uonlp/CulturaX', 'yahma/alpaca-cleaned', 'roneneldan/TinyStories', 'stingning/ultrachat', 'wikimedia/wikipedia', 'GAIR/lima', 'HuggingFaceH4/no_robots', 'cognitivecomputations/dolphin', 'cerebras/SlimPajama-627B', 'timdettmers/openassistant-guanaco', 'HuggingFaceH4/ultrachat_200k', 'EleutherAI/pile', 'liuhaotian/LLaVA-Instruct-150K', 'b-mc2/sql-create-context', 'garage-bAInd/Open-Platypus', 'bigcode/starcoderdata', 'microsoft/orca-math-word-problems-200k', 'ILSVRC/imagenet-1k', 'nyu-mll/glue', 'bigcode/the-stack-dedup', 'togethercomputer/RedPajama-Data-V2', 'gretelai/synthetic_text_to_sql', 'allenai/objaverse', 'Skylion007/openwebtext', 'Salesforce/wikitext', 'HuggingFaceM4/WebSight', 'RyokoAI/ShareGPT52K', 'laion/OIG', 'stanfordnlp/SHP', 'PleIAs/YouTube-Commons', 'Skywork/SkyPile-150B', 'glaiveai/glaive-function-calling-v2', 'Samsung/samsum', 'lmsys/chatbot_arena_conversations', 'openbmb/UltraFeedback', 'lambdalabs/pokemon-blip-captions', 'shibing624/medical', 'berkeley-nest/Nectar', 'Intel/orca_dpo_pairs', 'YeungNLP/firefly-train-1.1M', 'BAAI/COIG-PC', 'meta-math/MetaMathQA', 'openai/gsm8k', 'codeparrot/github-code', 'bookcorpus/bookcorpus', 'Open-Orca/SlimOrca', 'dair-ai/emotion', 'CohereForAI/aya_dataset', 'legacy-datasets/c4', 'cais/mmlu', 'open-web-math/open-web-math', 'code-search-net/code_search_net', 'allenai/WildChat-1M', 'rajpurkar/squad', 'litagin/moe-speech', 'Lin-Chen/ShareGPT4V', 'shareAI/ShareGPT-Chinese-English-90k', 'nomic-ai/gpt4all-j-prompt-generations', 'ceval/ceval-exam', 'google/fleurs', 'openai/webgpt_comparisons', 'bigcode/the-stack-v2', 'HuggingFaceM4/the_cauldron', 'Salesforce/dialogstudio', 'LDJnr/Capybara', 'stanfordnlp/imdb', 'nampdn-ai/tiny-codes', 'CausalLM/Refined-Anime-Text', 'bigscience/P3', 'vicgalle/alpaca-gpt4', 'bigcode/ta-prompt', 'Locutusque/UltraTextbooks', 'allenai/c4', 'pile-of-law/pile-of-law', 'teknium/openhermes', 'TIGER-Lab/MathInstruct', 'HuggingFaceH4/ultrafeedback_binarized', 'PygmalionAI/PIPPA', 'openai/openai_humaneval', 'abisee/cnn_dailymail', 'yizhongw/self_instruct', 'SirNeural/flan_v2', 'nvidia/HelpSteer', 'THUDM/AgentInstruct', 'nvidia/OpenMathInstruct-1', 'openai/summarize_from_feedback', 'nickrosh/Evol-Instruct-Code-80k-v1', 'storytracer/US-PD-Books', 'OpenAssistant/oasst2', 'Cohere/wikipedia-2023-11-embed-multilingual-v3', 'argilla/OpenHermesPreferences', 'Hello-SimpleAI/HC3', 'SciPhi/textbooks-are-all-you-need-lite', 'vikp/textbook_quality_programming', 'takala/financial_phrasebank', 'truthfulqa/truthful_qa', 'GAIR/MathPile', 'Anthropic/persuasion', 'm-a-p/Code-Feedback', 'laion/laion2B-en', 'wangrui6/Zhihu-KOL', 'openchat/openchat_sharegpt4_dataset', 'oscar-corpus/oscar', 'sahil2801/CodeAlpaca-20k', 'Tele-AI/TeleChat-PTD', 'mozilla-foundation/common_voice_11_0', 'mlabonne/orpo-dpo-mix-40k', 'Open-Orca/FLAN', 'rajpurkar/squad_v2', 'nyanko7/LLaMA-65B', 'aps/super_glue', 'cognitivecomputations/wizard_vicuna_70k_unfiltered', 'Amod/mental_health_counseling_conversations', 'EleutherAI/proof-pile-2', 'ProGamerGov/StableDiffusion-v1-5-Regularization-Images', 'defunct-datasets/the_pile_books3', 'legacy-datasets/mc4', 'knkarthick/dialogsum', 'argilla/distilabel-capybara-dpo-7k-binarized', 'nyanko7/danbooru2023', 'Hello-SimpleAI/HC3-Chinese', 'MMMU/MMMU', 'ise-uiuc/Magicoder-Evol-Instruct-110K', 'argilla/distilabel-intel-orca-dpo-pairs', 'H-D-T/Buzz', 'theblackcat102/evol-codealpaca-v1', 'animelover/danbooru2022', 'CohereForAI/aya_collection', 'allenai/soda', 'lvwerra/stack-exchange-paired', 'teknium/GPT4-LLM-Cleaned', 'BelleGroup/train_1M_CN', 'allenai/peS2o', 'vivym/midjourney-messages', 'oscar-corpus/OSCAR-2301', 'taesiri/arxiv_qa', 'unalignment/toxic-dpo-v0.1', 'math-ai/AutoMathText', 'mozilla-foundation/common_voice_13_0', 'nampdn-ai/tiny-textbooks', 'ise-uiuc/Magicoder-OSS-Instruct-75K', 'legacy-datasets/common_voice', 'armanc/scientific_papers', 'mlabonne/guanaco-llama2-1k', 'DIBT/10k_prompts_ranked', 'UCSD26/medical_dialog', 'nomic-ai/gpt4all_prompt_generations', 'google-research-datasets/go_emotions', 'iamtarun/python_code_instructions_18k_alpaca', 'argilla/dpo-mix-7k', 'MBZUAI/LaMini-instruction', 'qiaojin/PubMedQA', 'LinkSoul/instruction_merge_set', 'LooksJuicy/ruozhiba', 'pleisto/wikipedia-cn-20230720-filtered', 'kakaobrain/coyo-700m', 'gaia-benchmark/GAIA', 'PleIAs/Post-OCR-Correction', 'fancyzhx/ag_news', 'cognitivecomputations/WizardLM_alpaca_evol_instruct_70k_unfiltered', 'BelleGroup/train_3.5M_CN', 'togethercomputer/Long-Data-Collections', 'derek-thomas/ScienceQA', 'HuggingFaceM4/OBELICS', 'abacusai/SystemChat', 'google/MusicCaps', 'dell-research-harvard/AmericanStories', 'shahules786/orca-chat', 'li2017dailydialog/daily_dialog', 'cognitivecomputations/samantha-data', 'allenai/MADLAD-400', 'pixparse/idl-wds', 'eriktks/conll2003', 'oscar-corpus/OSCAR-2201', 'BelleGroup/multiturn_chat_0.8M', 'knowrohit07/know_sql', 'bigscience/xP3', 'mosaicml/dolly_hhrlhf', 'nvidia/ChatQA-Training-Data', 'zzliang/GRIT', 'cardiffnlp/tweet_eval', 'togethercomputer/RedPajama-Data-1T-Sample', 'izumi-lab/llm-japanese-dataset', 'TigerResearch/pretrain_zh', 'Dahoas/rm-static', 'HuggingFaceH4/stack-exchange-preferences', 'hakurei/open-instruct-v1', 'liuhaotian/LLaVA-Pretrain', 'MMInstruction/M3IT', 'lmsys/toxic-chat', 'openslr/librispeech_asr', 'codeparrot/apps', 'BelleGroup/train_2M_CN', 'laion/gpt4v-dataset', 'jondurbin/truthy-dpo-v0.1', 'argilla/ultrafeedback-binarized-preferences-cleaned', 'google-research-datasets/mbpp', 'xlangai/spider', 'Helsinki-NLP/opus-100', 'openlifescienceai/medmcqa', 'BelleGroup/train_0.5M_CN', 'defunct-datasets/amazon_reviews_multi', 'JeanKaddour/minipile', 'michaelwzhu/ChatMed_Consult_Dataset', 'MBZUAI/Bactrian-X', 'allenai/prosocial-dialog', 'csebuetnlp/xlsum', 'silk-road/Wizard-LM-Chinese-instruct-evol', 'allenai/WildChat', 'migtissera/Synthia-v1.3', 'MarkrAI/KoCommercial-Dataset', 'allenai/nllb', 'prometheus-eval/Feedback-Collection', 'TIGER-Lab/MMLU-Pro', 'codeparrot/github-code-clean', 'zhengyun21/PMC-Patients', 'ikala/tmmluplus', 'hendrycks/competition_math', 'espnet/yodas', 'm-a-p/CodeFeedback-Filtered-Instruction', 'LDJnr/Puffin', 'epfl-llm/guidelines', 'maywell/korean_textbooks', 'sentence-transformers/embedding-training-data', 'huggan/wikiart', 'Chinese-Vicuna/guanaco_belle_merge_v1.0', 'fnlp/moss-002-sft-data', 'openbmb/UltraInteract_sft', 'allenai/ai2_arc', 'deepmind/code_contests', 'succinctly/midjourney-prompts', 'AI4Math/MathVista', 'satellogic/EarthView', 'pixparse/pdfa-eng-wds', 'BelleGroup/school_math_0.25M', 'kaist-ai/CoT-Collection', 'allenai/objaverse-xl', 'Salesforce/wikisql', 'zeroshot/twitter-financial-news-sentiment', 'mozilla-foundation/common_voice_17_0', 'openbmb/UltraInteract_pair', 'microsoft/ms_marco', 'unimelb-nlp/wikiann', 'google/xtreme', 'osunlp/Mind2Web', 'yys/OpenOrca-Chinese', 'unalignment/toxic-dpo-v0.2', 'nampdn-ai/tiny-strange-textbooks', 'facebook/empathetic_dialogues', 'philschmid/sharegpt-raw', 'X2FD/LVIS-Instruct4V', 'deepmind/math_dataset', 'sunzeyeah/chinese_chatgpt_corpus', 'wanng/midjourney-v5-202304-clean', 'ybisk/piqa', 'IlyaGusev/gpt_roleplay_realm', 'cognitivecomputations/Dolphin-2.9', 'allenai/sciq', 'camel-ai/math', 'liuhaotian/LLaVA-CC3M-Pretrain-595K', 'silk-road/alpaca-data-gpt4-chinese', 'facebook/belebele', 'open-phi/textbooks', 'SciPhi/AgentSearch-V1', 'ylecun/mnist', 'Yelp/yelp_review_full', 'facebook/winoground', 'lmsys/mt_bench_human_judgments', 'shibing624/sharegpt_gpt4', 'gbharti/finance-alpaca', 'allenai/tulu-v2-sft-mixture', 'andersonbcdefg/synthetic_retrieval_tasks', 'Sao10K/Claude-3-Opus-Instruct-15K', 'm-a-p/Matrix', 'ncbi/pubmed', 'monology/pile-uncopyrighted', 'Open-Orca/SlimOrca-Dedup', 'medalpaca/medical_meadow_medqa', 'zxbsmk/webnovel_cn', 'BI55/MedText', 'Rowan/hellaswag', 'PKU-Alignment/PKU-SafeRLHF', 'rubend18/ChatGPT-Jailbreak-Prompts', 'flytech/python-codes-25k', 'hollyyfc/tidytuesday_for_python', 'shibing624/alpaca-zh', 'THUDM/LongBench', 'glaiveai/glaive-code-assistant', 'keivalya/MedQuad-MedicalQnADataset', 'arxiv-community/arxiv_dataset', 'nyu-mll/multi_nli', 'kunishou/databricks-dolly-15k-ja', 'lemonilia/LimaRP', 'allenai/math_qa', 'stanfordnlp/sst2', 'EleutherAI/the_pile_deduplicated', 'HuggingFaceH4/CodeAlpaca_20K', 'pankajmathur/WizardLM_Orca', 'glaiveai/glaive-function-calling', 'LDJnr/Pure-Dove', 'vikhyatk/lnqa', 'hiyouga/DPO-En-Zh-20k', 'yfszzx/inspiration', 'Dahoas/full-hh-rlhf', 'codefuse-ai/Evol-instruction-66k', 'ZenMoore/RoleBench', 'speechcolab/gigaspeech', 'neural-bridge/rag-dataset-12000', 'defunct-datasets/amazon_us_reviews', 'wikimedia/wikisource', 'THUDM/humaneval-x', 'liyucheng/zhihu_rlhf_3k', 'PatronusAI/financebench', 'EdinburghNLP/xsum', 'unicamp-dl/mmarco', '0xJustin/Dungeons-and-Diffusion', 'tiange/Cap3D', 'NumbersStation/NSText2SQL', 'b3x0m/Chinese-H-Novels', 'hotpotqa/hotpot_qa', 'YeungNLP/moss-003-sft-data', 'osunlp/MagicBrush', 'Yukang/LongAlpaca-12k', 'math-ai/StackMathQA', 'PolyAI/minds14', 'FreedomIntelligence/HuatuoGPT-sft-data-v1', 'nlpai-lab/kullm-v2', 'ai4privacy/pii-masking-200k', 'argilla/OpenHermes2.5-dpo-binarized-alpha', 'ArmelR/stack-exchange-instruction', 'argilla/distilabel-math-preference-dpo', 'allenai/openbookqa', 'facebook/voxpopuli', 'IlyaGusev/ru_turbo_alpaca', 'griffin/chain_of_density', 'jondurbin/gutenberg-dpo-v0.1', 'PleIAs/French-PD-Newspapers', 'ParlAI/blended_skill_talk', 'mandarjoshi/trivia_qa', 'ranjaykrishna/visual_genome', 'JanosAudran/financial-reports-sec', 'fnlp/moss-003-sft-data', 'approximatelabs/tablib-v1-full', 'mozilla-foundation/common_voice_16_0', 'xai-org/RealworldQA', 'lmsys/lmsys-arena-human-preference-55k', 'Abirate/english_quotes', 'BelleGroup/generated_chat_0.4M', 'maharshipandya/spotify-tracks-dataset', 'TokenBender/code_instructions_122k_alpaca_style', 'Flmc/DISC-Med-SFT', 'ShengbinYue/DISC-Law-SFT', 'argilla/ultrafeedback-binarized-preferences', 'alexfabbri/multi_news', 'nguha/legalbench', 'Squish42/bluemoon-fandom-1-1-rp-cleaned', 'gorilla-llm/APIBench', 'OpenAssistant/oasst_top1_2023-08-25', 'joujiboi/japanese-anime-speech', 'BAAI/CCI-Data', 'google-research-datasets/conceptual_captions', 'selfrag/selfrag_train_data', 'MLCommons/peoples_speech', 'laion/laion-coco', 'gamino/wiki_medical_terms', 'yitingxie/rlhf-reward-datasets', 'PKU-Alignment/PKU-SafeRLHF-10K', 'graelo/wikipedia', 'bitext/Bitext-customer-support-llm-chatbot-training-dataset', 'AdaptLLM/finance-tasks', 'XzJosh/audiodataset', 'BAAI/TACO', 'nvidia/ChatRAG-Bench', 'google/boolq', 'kdexd/red_caps', 'ccdv/pubmed-summarization', 'ctheodoris/Genecorpus-30M', 'Cohere/wikipedia-22-12-en-embeddings', 'tasksource/bigbench', 'junelee/sharegpt_deepl_ko', 'elyza/ELYZA-tasks-100', 'codefuse-ai/CodeExercise-Python-27k', 'FreedomIntelligence/ALLaVA-4V', 'NilanE/ParallelFiction-Ja_En-100k', 'facebook/multilingual_librispeech', 'ms903/sovits4.0-768vec-layer12', 'CohereForAI/xP3x', 'princeton-nlp/SWE-bench', 'allenai/ultrafeedback_binarized_cleaned', 'sujet-ai/Sujet-Finance-Instruct-177k', 'tau/commonsense_qa', 'ccdv/arxiv-summarization', 'AmazonScience/massive', 'ShapeNet/ShapeNetCore', 'bigbio/med_qa', 'Cohere/wikipedia-22-12-simple-embeddings', 'lukaemon/mmlu', 'bigcode/humanevalpack', 'ArtifactAI/arxiv-math-instruct-50k', 'dikw/hh_rlhf_cn', 'ethz/food101', 'allenai/qasper', 'stanfordnlp/snli', 'Helsinki-NLP/tatoeba_mt', 'laion/laion-high-resolution', 'facebook/flores', 'reazon-research/reazonspeech', 'swype/instruct', 'athirdpath/DPO_Pairs-Roleplay-Alpaca-NSFW', 'cognitivecomputations/dolphin-coder', 'McGill-NLP/WebLINX', 'sarvamai/samvaad-hi-v1', 'froggeric/creativity', '0-hero/Matter-0.1', 'NortheasternUniversity/big_patent', 'statmt/cc100', 'jhu-clsp/jfleg', 'neulab/conala', 'jmhessel/newyorker_caption_contest', 'HuggingFace-CN-community/translation', 'bigcode/commitpack', 'akoksal/LongForm', 'JourneyDB/JourneyDB', 'OpenGVLab/InternVid', 'heliosbrahma/mental_health_chatbot_dataset', 'reciTAL/mlsum', 'google/xtreme_s', 'Linaqruf/pixiv-niji-journey', 'THUDM/webglm-qa', 'starmpcc/Asclepius-Synthetic-Clinical-Notes', 'fondant-ai/fondant-cc-25m', 'jondurbin/airoboros-3.1', 'wenge-research/yayi2_pretrain_data', 'TuringsSolutions/NYTWritingStyleGuide', 'KBlueLeaf/danbooru2023-sqlite', 'xx103/NYC_Motor_Vehicle_Collisions_and_Weather_Dataset', 'bigcode/self-oss-instruct-sc2-exec-filter-50k', 'google-research-datasets/natural_questions', 'Helsinki-NLP/open_subtitles', 'Dahoas/synthetic-instruct-gptj-pairwise', 'open-llm-leaderboard/results', 'teknium/trismegistus-project', 'ro-h/regulatory_comments', 'ibrahimhamamci/CT-RATE', 'ruslanmv/ai-medical-chatbot', 'defunct-datasets/eli5', 'cimec/lambada', 'PhilipMay/stsb_multi_mt', 'GEM/wiki_lingua', 'euirim/goodwiki', 'laion/220k-GPT4Vision-captions-from-LIVIS', 'sc890/DEEPFRUlT_DATASET', 'Replete-AI/code_bagel', 'uoft-cs/cifar10', 'curaihealth/medical_questions_pairs', 'codeparrot/codeparrot-clean', 'google/bigbench', 'camel-ai/physics', 'bigcode/commitpackft', 'silk-road/ChatHaruhi-54K-Role-Playing-Dialogue', 'clouditera/security-paper-datasets', 'openerotica/freedom-rp', 'Major-TOM/Core-S2L2A', 'vblagoje/cc_news', 'facebook/kilt_tasks', 'deepmind/pg19', 'allenai/winogrande', 'aharley/rvl_cdip', 'naver-clova-ix/cord-v2', 'jamescalam/unsplash-25k-photos', 'jkhedri/psychology-dataset', 'grammarly/coedit', 'Duxiaoman-DI/FinCorpus', 'a686d380/h-corpus-2023', 'teknium/dataforge-economics', 'jondurbin/cinematika-v0.1', 'mlabonne/chatml_dpo_pairs', 'hieunguyenminh/roleplay', 'xz56/react-llama', 'TeraflopAI/Caselaw_Access_Project', 'coastalcph/lex_glue', 'cornell-movie-review-data/rotten_tomatoes', 'community-datasets/yahoo_answers_topics', 'miracl/miracl', 'humarin/chatgpt-paraphrases', 'junelee/wizard_vicuna_70k', 'csitfun/LogiCoT', 'haonan-li/cmmlu', 'shahules786/orca-best', 'yuvalkirstain/pickapic_v2', 'mozilla-foundation/common_voice_16_1', 'Locutusque/UltraTextbooks-2.0', 'm-a-p/MAP-CC', 'google/code_x_glue_ct_code_to_text', 'kmfoda/booksum', 'hoskinson-center/proof-pile', 'kaiokendev/SuperCOT-dataset', 'tatsu-lab/alpaca_eval', 'kwaikeg/KAgentInstruct', 'MaziyarPanahi/WizardLM_evol_instruct_V2_196k', 'facebook/xnli', 'Muennighoff/flan', 'qwedsacf/grade-school-math-instructions', 'rickRossie/bluemoon_roleplay_chat_data_300k_messages', 'codeparrot/self-instruct-starcoder', 'umarbutler/open-australian-legal-corpus', 'teleprint-me/phi-1', 'google/dreambooth', 'LDJnr/LessWrong-Amplify-Instruct', 'ro-h/regulatory_comments_api', 'Severian/Internal-Knowledge-Map', 'lamini/earnings-calls-qa', 'LanguageBind/Open-Sora-Plan-v1.0.0', 'stanfordnlp/coqa', 'allenai/ropes', 'ought/raft', 'transformersbook/codeparrot', 'nateraw/parti-prompts', 'allenai/real-toxicity-prompts', 'Muennighoff/natural-instructions', 'argilla/databricks-dolly-15k-curated-multilingual', 'alpindale/visual-novels', 'Norquinal/claude_multiround_chat_30k', 'yentinglin/TaiwanChat', 'qgyd2021/chinese_ner_sft', 'LDJnr/Verified-Camel', 'WenhaoWang/VidProM', 'bigcode/the-stack-v2-dedup', 'Cohere/wikipedia-2023-11-embed-multilingual-v3-int8-binary', 'internlm/Agent-FLAN', 'isidentical/moondream2-coyo-5M-captions', 'zalando-datasets/fashion_mnist', 'shibing624/nli_zh', 'Monash-University/monash_tsf', 'camel-ai/ai_society', 'michaelwzhu/ShenNong_TCM_Dataset', 'linhtran92/viet_bud500', 'Clinton/Text-to-sql-v1', 'glaiveai/glaive-code-assistant-v2', 'llmware/rag_instruct_benchmark_tester', 'jovianzm/Pexels-400k', 'WhiteRabbitNeo/WRN-Chapter-1', 'Locutusque/function-calling-chatml', 'ShimizuYuki/Marvel_network', 'clips/mqa', 'toxigen/toxigen-data', 'joelniklaus/Multi_Legal_Pile', 'miracl/miracl-corpus', 'alespalla/chatbot_instruction_prompts', 'teknium/GPTeacher-General-Instruct', 'jondurbin/airoboros-gpt4-1.4.1', 'VMware/open-instruct', 'allenai/reward-bench', 'davanstrien/haiku_dpo', 'klue/klue', 'ncbi/ncbi_disease', 'esdurmus/wiki_lingua', 'wikimedia/wit_base', 'shunk031/JGLUE', 'llm-wizard/alpaca-gpt4-data-zh', 'Vision-CAIR/cc_sbu_align', 'pharaouk/dharma-1', 'jondurbin/airoboros-2.2.1', 'Vezora/Tested-22k-Python-Alpaca', 'HAERAE-HUB/KMMLU', 'MMInstruction/ArxivCap', 'jondurbin/py-dpo-v0.1', 'PleIAs/French-PD-Books', 'CohereForAI/aya_evaluation_suite', 'CohereForAI/aya_collection_language_split', 'ClusterlabAi/101_billion_arabic_words_dataset', 'google/imageinwords', 'fancyzhx/amazon_polarity', 'ehovy/race', 'oscar-corpus/OSCAR-2109', 'zh-plus/tiny-imagenet', 'MoritzLaurer/multilingual-NLI-26lang-2mil7', 'tyqiangz/multilingual-sentiments', 'detection-datasets/fashionpedia', 'EleutherAI/lambada_openai', 'Anthropic/model-written-evals', 'ds4sd/DocLayNet', 'Zellic/smart-contract-fiesta', 'FreedomIntelligence/huatuo_encyclopedia_qa', 'Chinese-Vicuna/instruct_chat_50k.jsonl', 'Trelis/function_calling_extended', 'FreedomIntelligence/Evol-Instruct-Chinese-GPT4', 'Anthropic/discrim-eval', 'nlpie/Llama2-MedTuned-Instructions', 'PixArt-alpha/SAM-LLaVA-Captions10M', 'AkitoP/Hscene-Speech', 'facebook/mlqa', 'webis/tldr-17', 'CogComp/trec', 'biglam/europeana_newspapers', 'pacovaldez/stackoverflow-questions', 'TigerResearch/sft_zh', 'zjunlp/Mol-Instructions', 'pufanyi/MIMICIT', 'BAAI/JudgeLM-100K', 'Trelis/function_calling_v3', 'google/Synthetic-Persona-Chat', 'FarReelAILab/Machine_Mindset_MBTI_dataset', 'jtatman/stable-diffusion-prompts-stats-full-uncensored', 'KBlueLeaf/danbooru2023-webp-4Mpixel', 'THUDM/LongAlign-10k', 'LeoZhangzaolin/Graptoloidea-Specimens-Imaging', 'ResplendentAI/NSFW_RP_Format_DPO', 'RekaAI/VibeEval', 'tomg-group-umd/cinepile', 'legacy-datasets/banking77', 'rmyeid/polyglot_ner', 'community-datasets/tapaco', 'deepset/germanquad', 'laion/laion2B-multi', 'huggan/smithsonian_butterflies_subset', 'CShorten/ML-ArXiv-Papers', 'codeparrot/xlcost-text-to-code', 'lukaemon/bbh', 'thu-coai/Safety-Prompts', 'IDEA-CCNL/Ziya-Eval-Chinese', 'cognitivecomputations/WizardLM_evol_instruct_V2_196k_unfiltered_merged_split', 'beyond/rlhf-reward-single-round-trans_chinese', 'jerryjalapeno/nart-100k-synthetic', 'vikp/pypi_clean', 'cognitivecomputations/ultrachat-uncensored', 'facebook/emu_edit_test_set', 'playgroundai/MJHQ-30K', 'zwn22/NC_Crime', 'Shitao/MLDR', 'Sayali9141/traffic_signal_images', 'deutsche-telekom/Ger-RAG-eval', 'FiscalNote/billsum', 'clue/clue', 'theatticusproject/cuad-qa', 'Helsinki-NLP/opus_books', 'SLPL/naab', 'Cohere/wikipedia-22-12', 'MohamedRashad/ChatGPT-prompts', 'HuggingFace-CN-community/Diffusion-book-cn', 'HuggingFaceH4/instruction-dataset', 'deepset/prompt-injections', 'OpenLeecher/Teatime', 'math-eval/TAL-SCQ5K', 'HackerNoon/tech-company-news-data-dump', 'LLM360/AmberDatasets', 'peiyi9979/Math-Shepherd', 'Crystalcareai/MoD', 'papluca/language-identification', 'bigcode/the-stack-smol', 'argilla/news-summary', 'CarperAI/openai_summarize_comparisons', 'argilla/databricks-dolly-15k-curated-en', 'mikex86/stackoverflow-posts', 'Anthropic/llm_global_opinions', 'akjindal53244/Arithmo-Data', 'OpenLLM-France/Claire-Dialogue-French-0.1', 'arbml/CIDAR', 'snorkelai/Snorkel-Mistral-PairRM-DPO-Dataset', 'PleIAs/US-PD-Newspapers', 'yh0701/FracAtlas_dataset', 'somosnlp/Reglamento_Aeronautico_Colombiano_2024GemmaQA', 'omi-health/medical-dialogue-to-soap-summary', 'argilla/Capybara-Preferences', 'UCLNLP/adversarial_qa', 'convai-challenge/conv_ai_2', 'ccdv/govreport-summarization', 'mozilla-foundation/common_voice_8_0', 'nomic-ai/gpt4all_prompt_generations_with_p3', 'hugfaceguy0001/retarded_bar', 'lksy/ru_instruct_gpt4', 'Linly-AI/Chinese-pretraining-dataset', 'mosaicml/instruct-v3', 'corbt/all-recipes', 'VatsaDev/TinyText', 'google/docci', 'linux-cn/archive', 'Johnnyeee/Yelpdata_663', 'HuggingFaceTB/cosmopedia-100k', 'nyu-mll/blimp', 'defunct-datasets/bookcorpusopen', 'IWSLT/iwslt2017', 'mbien/recipe_nlg', 'Helsinki-NLP/tatoeba', 'GEM/viggo', 'bavard/personachat_truecased', 'segments/sidewalk-semantic', 'PolyAI/banking77', 'facebook/pmd', 'zeroshot/twitter-financial-news-topic', 'nuprl/MultiPL-E', 'GBaker/MedQA-USMLE-4-options', 'camel-ai/code', 'merve/turkish_instructions', 'tasksource/oasst1_pairwise_rlhf_reward', 'winddude/reddit_finance_43_250k', 'tiedong/goat', 'togethercomputer/RedPajama-Data-Instruct', 'DKYoon/SlimPajama-6B', 'Maxx0/sexting-nsfw-adultconten', 'squarelike/OpenOrca-gugugo-ko', 'MMInstruction/VLFeedback', 'LLaVA-VL/llava-plus-data', 'McAuley-Lab/Amazon-Reviews-2023', 'Open-Orca/1million-gpt-4', 'gwenxin/pills_inside_bottles', 'keithito/lj_speech', 'ontonotes/conll2012_ontonotesv5', 'mwritescode/slither-audited-smart-contracts', 'bsmock/pubtables-1m', 'tasksource/mmlu', 'bigcode/bigcode-pii-dataset', 'medalpaca/medical_meadow_wikidoc', 'P01son/instructions', 'ArtifactAI/arxiv-physics-instruct-tune-30k', 'mrtoy/mobile-ui-design', 'nampdn-ai/tiny-orca-textbooks', 'kyujinpy/KOpen-platypus', 'YeungNLP/firefly-pretrain-dataset', 'unalignment/airoboros-2.2', 'totally-not-an-llm/EverythingLM-data-V3', 'CASIA-LM/ChineseWebText', 'NeuralNovel/Neural-DPO', 'AI4Math/MathVerse', 'ucinlp/drop', 'Harvard/gigaword', 'CUHK-CSE/wider_face', 'microsoft/wiki_qa', 'HUPD/hupd', 'liweili/c4_200m', 'nielsr/funsd-layoutlmv3', 'IDEA-CCNL/laion2B-multi-chinese-subset', 'dennlinger/eur-lex-sum', 'mitclinicalml/clinical-ie', 'Matthijs/cmu-arctic-xvectors', 'FredZhang7/stable-diffusion-prompts-2.47M', 'philschmid/flanv2', 'NTU-NLP-sg/xCodeEval', 'MadVoyager/stable_diffusion_instructional_dataset', 'zetavg/ShareGPT-Processed', 'shibing624/nli-zh-all', 'oscar-corpus/colossal-oscar-1.0', 'greengerong/leetcode', 'ProgramComputer/voxceleb', 'allenai/paloma', 'jondurbin/airoboros-3.2', 'facebook/anli', 'ibm/duorc', 'GEM/gem', 'peluz/lener_br', 'Helsinki-NLP/news_commentary', 'google-research-datasets/paws-x', 'clips/mfaq', 'skytnt/anime-segmentation', 'alkzar90/NIH-Chest-X-ray-dataset', 'olm/wikipedia', 'jamescalam/youtube-transcriptions', 'shjwudp/chinese-c4', 'eloukas/edgar-corpus', 'reasoning-machines/gsm-hard', 'merve/my_notes', 'timbrooks/instructpix2pix-clip-filtered', 'liswei/rm-static-zhTW', 'llm-wizard/alpaca-gpt4-data', 'camel-ai/chemistry', 'THUDM/ImageRewardDB', 'rewoo/planner_instruction_tuning_2k', 'OpenLeecher/GPT4-10k', 'breadlicker45/bread-midi-dataset', 'Tarklanse/Traditional_Chinese_roleplay_chat_Dataset', 'jat-project/jat-dataset', 'lavita/ChatDoctor-HealthCareMagic-100k', 'wuliangfo/Chinese-Pixiv-Novel', 'knowrohit07/know_medical_dialogue_v2', 'hackaprompt/hackaprompt-dataset', 'maywell/ko_wikidata_QA', 'swechatelangana/chandamama-kathalu', 'Idavidrein/gpqa', 'HuggingFaceH4/deita-10k-v0-sft', 'm-a-p/CMMMU', 'dcayton/nba_tracking_data_15_16', 'kunishou/J-ResearchCorpus', 'FreedomIntelligence/ApolloCorpus', 'lightblue/tagengo-gpt4', 'jojo0217/korean_safe_conversation', 'hfl/ruozhiba_gpt4_turbo', 'deepmind/narrativeqa', 'RussianNLP/russian_super_glue', 'google/speech_commands', 'karpathy/tiny_shakespeare', 'facebook/wiki_dpr', 'skt/kobest_v1', 'laion/laion-art', 'gigant/oldbookillustrations', 'ontocord/OIG-moderation', 'cryscan/multilingual-share', 'roneneldan/TinyStoriesInstruct', 'hltcoe/megawika', 'Aeala/ShareGPT_Vicuna_unfiltered', '64bits/lima_vicuna_format', 'nampdn-ai/tiny-webtext', 'BAAI/COIG-PC-Lite', 'LinkSoul/Chinese-LLaVA-Vision-Instructions', 'AdaptLLM/medicine-tasks', 'MBZUAI/VideoInstruct-100K', 'jondurbin/contextual-dpo-v0.1', 'matlok/multimodal-python-copilot-training-overview', 'bai-roleplay/evol-character-200', 'cathw/reddit_climate_comment', 'wenbopan/Chinese-dpo-pairs', 'AI-Lab-Makerere/beans', 'indonlp/indonlu', 'coastalcph/multi_eurlex', 's3prl/superb', 'universal-dependencies/universal_dependencies', 'Babelscape/wikineural', 'pmc/open_access', 'winvoker/turkish-sentiment-analysis-dataset', 'edinburghcstr/ami', 'Erythrocyte/Genshin_Datasets', 'bigcode/the-stack-github-issues', 'shibing624/CSC', 'mattmdjaga/human_parsing_dataset', 'camel-ai/biology', 'hssd/hssd-hab', 'PKU-Alignment/BeaverTails', 'rhasspy/piper-checkpoints', 'visheratin/laion-coco-nllb', 'iamtarun/code_instructions_120k_alpaca', 'rombodawg/LosslessMegaCodeTrainingV3_1.6m_Evol', 'vivym/midjourney-prompts', 'qgyd2021/few_shot_intent_sft', 'QuyenAnhDE/Diseases_Symptoms', 'ajibawa-2023/Python-Code-23k-ShareGPT', 'm-a-p/COIG-Kun', 'CausalLM/GPT-4-Self-Instruct-German', 'shareAI/novelai3', 'MinervaAI/Aesir-Preview', 'wintercoming6/artwork_for_sdxl', 'Salesforce/lotsa_data', 'ForzaJuve1/UEFA_Euro_2020_Data', 'mo-mittal/reddit_political_subs', 'Targoman/TLPC', 'google-research-datasets/paws', 'Stanford/web_questions', 'bigscience-data/roots_zh-cn_wikipedia', 'laion/laion2B-en-aesthetic', 'daekeun-ml/naver-news-summarization-ko', 'CarperAI/openai_summarize_tldr', 'competitions/aiornot', 'huggingface/badges', 'allenai/lila', 'yuvalkirstain/pickapic_v1', 'tatsu-lab/alpaca_farm', 'cognitivecomputations/open-instruct-uncensored', 'CheshireAI/guanaco-unchained', 'openchat/openchat_sharegpt_v3', 'LinkSoul/LLaSM-Audio-Instructions', 'totally-not-an-llm/EverythingLM-data-V2', 'jinaai/code_exercises', '0-hero/prompt-perfect', 'jamescalam/ai-arxiv-chunked', 'maywell/ko_Ultrafeedback_binarized', 'keirp/hungarian_national_hs_finals_exam', 'laion/laion-pop', 'gvecchio/MatSynth', 'baobab-trees/wikipedia-human-retrieval-ja', 'mii-llm/gazzetta-ufficiale', 'shachardon/ShareLM', 'MohamedRashad/midjourney-detailed-prompts', 'ade-benchmark-corpus/ade_corpus_v2', 'uoft-cs/cifar100', 'mhardalov/exams', 'josecannete/large_spanish_corpus', 'allenai/quac', 'microsoft/xglue', 'huggingface/documentation-images', 'seamew/ChnSentiCorp', 'tau/scrolls', 'bible-nlp/biblenlp-corpus', 'JulesBelveze/tldr_news', 'christopher/rosetta-code', 'inria-soda/tabular-benchmark', 'beyond/chinese_clean_passages_80m', 'bigbio/pubmed_qa', 'Cohere/miracl-zh-queries-22-12', 'koutch/stackoverflow_python', 'ACCA225/Kaggle-Stable-Diffusion', 'Yasbok/Alpaca_arabic_instruct', 'bertin-project/alpaca-spanish', 'laion/laion400m', 'axiong/pmc_oa', 'medalpaca/medical_meadow_medical_flashcards', 'dominguesm/Canarim-Instruct-PTBR-Dataset', 'p1atdev/niji-v5', 'zetavg/coct-en-zh-tw-translations-twp-300k', 'skeskinen/TinyStories-GPT4', 'xmcmic/PMC-VQA', 'beomi/KoAlpaca-v1.1a', 'ecnu-icalk/educhat-sft-002-data-osm', 'kyujinpy/OpenOrca-KO', 'open-phi/programming_books_llama', 'hkust-nlp/deita-10k-v0', 'jxu124/OpenX-Embodiment', 'm-a-p/MusicPile', 'ajibawa-2023/Code-290k-ShareGPT', 'bai-roleplay/evol-character-entire', 'minhanhto09/NuCLS_dataset', 'cl-nagoya/auto-wiki-qa', 'speechbrain/common_language', 'ucirvine/sms_spam', 'Babelscape/rebel-dataset', 'cfilt/iitb-english-hindi', 'gfissore/arxiv-abstracts-2021', 'mozilla-foundation/common_voice_7_0', 'sil-ai/bloom-lm', 'kensho/spgispeech', 'bigscience/xP3all', 'llm-wizard/dolly-15k-instruction-alpaca-format', 'liyucheng/zhihu_26k', 'tarungupta83/MidJourney_v5_Prompt_dataset', 'jondurbin/airoboros-uncensored', 'llm-blender/mix-instruct', 'UmaDiffusion/ULTIMA', 'BAAI/SVIT', 'AdiOO7/llama-2-finance', 'togethercomputer/llama-instruct', 'kingbri/PIPPA-shareGPT', 'Minami-su/roleplay_multiturn_chat_1k_zh_v0.1', 'Illia56/Military-Aircraft-Detection', 'cis-lmu/Glot500', 'facebook/emu_edit_test_set_generations', 'Yukang/LongAlpaca-16k-length', 'THUDM/CogVLM-SFT-311K', 'qnguyen3/llava-fn-calling', 'Locutusque/hercules-v2.0', 'HathawayLiu/housing_dataset', 'bigcode/the-stack-v2-train-full-ids', 'YXu120/NC_Education', 'motherduckdb/duckdb-text2sql-25k', 'Wenetspeech4TTS/WenetSpeech4TTS', 'naklecha/minecraft-question-answer-700k', 'HannahRoseKirk/prism-alignment', 'halabi2016/arabic_speech_corpus', 'allenai/common_gen', 'ImperialCollegeLondon/health_fact', 'pfb30/multi_woz_v22', 'nfL6/yahoo_answers_qa', 'MLCommons/ml_spoken_words', 'ucberkeley-dlab/measuring-hate-speech', 'bigscience/xP3mt', 'sayakpaul/nyu_depth_v2', 'argilla/medical-domain', 'nlphuji/flickr30k', 'aadityaubhat/GPT-wiki-intro', 'nbertagnolli/counsel-chat', 'theblackcat102/codex-math-qa', 'RyokoAI/Syosetu711K', 'emre/stanford-alpaca-cleaned-turkish-translated', 'somosnlp-hackathon-2023/Habilidades_Agente_v1', 'recastai/LAION-art-EN-improved-captions', 'FreedomIntelligence/huatuo_knowledge_graph_qa', 'FreedomIntelligence/ShareGPT-CN', 'Mutonix/RefGPT-Fact', 'nlpai-lab/databricks-dolly-15k-ko', 'TempoFunk/webvid-10M', 'shinonomelab/cleanvid-15m_map', 'smangrul/code-chat-assistant-v1', 'OleehyO/latex-formulas', 'daat/DATA', 'axiong/pmc_llama_instructions', 'AdaptLLM/law-tasks', 'chargoddard/rpguild', 'AiresPucrs/stanford-encyclopedia-philosophy', 'amaai-lab/MusicBench', 'diffusers/pokemon-gpt4-captions', 'migtissera/Tess-Coder-v1.0', 'HaoyeZhang/RLHF-V-Dataset', 'togethercomputer/glaive-function-calling-v2-formatted', 'osunlp/TravelPlanner', 'BioMistral/BioInstructQA', 'misikoff/zillow', 'MedRAG/pubmed', 'Writer/omniact', 'openbmb/UltraSafety', 'visheratin/realworldqa', 'lorinma/ChineseEncyclopedia', 'sealuzh/app_reviews', 'levow/msra_ner', 'openslr/openslr', 'INK-USC/riddle_sense', 'zhoubolei/scene_parse_150', 'allenai/scitldr', 'google-research-datasets/tydiqa', 'IlyaGusev/gazeta', 'albertvillanova/legal_contracts', 'google-research-datasets/conceptual_12m', 'facebook/textvqa', 'VIMA/VIMA-Data', 'hanamizuki-ai/genshin-voice-v3.3-mandarin', 'Nerfgun3/sakimi-chan_LoRA', 'cyberagent/crello', 'jxm/the_office_lines', 'WynterJones/chatgpt-roles', 'gbharti/wealth-alpaca_lora', 'THUIR/T2Ranking', 'IlyaGusev/ru_turbo_saiga', 'tasksource/ScienceQA_text_only', 'cvssp/WavCaps', 'lighteval/MATH', 'kunishou/oasst1-89k-ja', 'zetavg/zh-tw-wikipedia', 'lighteval/legal_summarization', 'skeskinen/TinyStories-hf', 'silk-road/chinese-dolly-15k', 'TigerResearch/tigerbot-zhihu-zh-10k', 'open-llm-leaderboard/requests', 'mlabonne/guanaco-llama2', 'totally-not-an-llm/EverythingLM-data', 'BELLE-2/train_3.5M_CN_With_Category', 'rizerphe/glaive-function-calling-v2-llama', 'rombodawg/LimitlessMegaCodeTraining', 're-align/just-eval-instruct', 'IlyaGusev/pippa_scored', 'IGNF/FLAIR', 'allenai/WildChat-nontoxic', 'Unbabel/TowerBlocks-v0.1', 'ShoukanLabs/AniSpeech', 'unsloth/notebooks', 'GAIR/MathPile_Commercial', 'abacusai/MetaMathFewshot', 'DiscoResearch/germanrag', 'cdoswald/SPIDER', 'yixuantt/MultiHopRAG', 'instructkr/ko_elo_arena_0207', 'osunlp/SMolInstruct', 'allenai/WildBench', 'FuseAI/FuseChat-Mixture', 'Vezora/Tested-143k-Python-Alpaca', 'microsoft/cats_vs_dogs', 'tdavidson/hate_speech_offensive', 'SNOW-NLP/snow_simplified_japanese_corpus', 'timit-asr/timit_asr', 'webnlg-challenge/web_nlg', 'michaelauli/wiki_bio', 'kili-technology/plastic_in_river', 'qanastek/MASSIVE', 'google/wit', 'sil-ai/bloom-speech', 'FacePerceiver/laion-face', 'codeparrot/codecomplex', 'codeparrot/github-jupyter-code-to-text', 'neuralworm/stable-diffusion-discord-prompts', 'detection-datasets/coco', 'Gxg/Math23K', 'ashraq/fashion-product-images-small', 'animelover/genshin-impact-images', 'suolyer/webqa', 'fusing/fill50k', 'dominguesm/alpaca-data-pt-br', 'multimodalart/facesyntheticsspigacaptioned', 'jiacheng-ye/logiqa-zh', 'sam-mosaic/vicuna_alpaca_hc3_chatml', 'thefcraft/civitai-stable-diffusion-337k', 'Nan-Do/instructional_code-search-net-python', 'izumi-lab/llm-japanese-dataset-vanilla', 'xmj2002/Chinese_modern_classical', 'cognitivecomputations/based', 'laion/strategic_game_chess', 'jondurbin/airoboros-gpt4-1.2', 'jondurbin/airoboros-gpt4-m2.0', 'rombodawg/LosslessMegaCodeTrainingV2', 'shareAI/CodeChat', 'qgyd2021/h_novel', 'BAAI/COIG-PC-core', 'Duxiaoman-DI/FinanceIQ', 'Unified-Language-Model-Alignment/Anthropic_HH_Golden', 'osunlp/TableInstruct', 'CollectiveCognition/chats-data-2023-10-16', 'hypervariance/function-calling-sharegpt', 'google/reveal', 'corbyrosset/researchy_questions', 'Locutusque/Hercules-v3.0', 'jmc255/aphantasia_drawing_dataset', 'sayhan/strix-philosophy-qa', 'fnlp/AnyInstruct', 'NousResearch/json-mode-eval', 'XintongHe/Stomatal_Images_Datasets', 'abacusai/MetaMath_DPO_FewShot', 'coseal/CodeUltraFeedback', 'BAAI/CCI2-Data', 'Astris/LA-Times', 'H-D-T/RLSTACK', 'deepmind/aqua_rat', 'abuelkhair-corpus/arabic_billion_words', 'google/code_x_glue_tc_text_to_code', 'McGill-NLP/medal', 'IWSLT/mt_eng_vietnamese', 'quora-competitions/quora', 'CSTR-Edinburgh/vctk', 'wmt/wmt19', 'dalle-mini/YFCC100M_OpenAI_subset', 'merve/poetry', 'yhavinga/ccmatrix', 'silver/personal_dialog', 'embedding-data/sentence-compression', 'mozilla-foundation/common_voice_10_0', 'm1guelpf/nouns', 'Fazzie/Teyvat', 'daspartho/stable-diffusion-prompts', 'cardiffnlp/tweet_sentiment_multilingual', 'PublicPrompts/Karsh', 'MCG-NJU/MultiSports', 'Dahoas/static-hh', 'CarperAI/pilev2-dev', 'shibing624/AdvertiseGen', 'andersonbcdefg/supernatural-instructions-2m', 'azcorpus/azcorpus_v0', 'cognitivecomputations/oa_leet10k', 'Abrumu/Fashion_controlnet_dataset_V3', 'tasksource/tasksource-instruct-v0', 'wenge-research/yayi_domain_subset', 'ignmilton/ign_clean_instruct_dataset_500k', 'changpt/ko-lima-vicuna', 'pankajmathur/alpaca_orca', 'marhensa/comfyui-workflow', 'jondurbin/airoboros-2.1', 'M-A-D/Mixed-Arabic-Datasets-Repo', 'taide/TAIDE-14-tasks', 'manu/project_gutenberg', 'Lakera/gandalf_ignore_instructions', 'goendalf666/sales-conversations', 'yuyijiong/Multi-Doc-QA-Chinese', 'fnlp/character-llm-data', 'wenge-research/yayi_uie_sft_data', 'glaiveai/glaive-code-assistant-v3', 'davidchan/anim400k', 'prometheus-eval/Preference-Collection', 'numind/NuNER', 'YuxuanZhang888/ColonCancerCTDataset', 'TIGER-Lab/SKGInstruct', 'CyberNative/Code_Vulnerability_Security_DPO', 'hiyouga/glaive-function-calling-v2-sharegpt', 'ai4bharat/sangraha', 'ontocord/viet4all', 'cloneofsimo/imagenet.int8', 'Replete-AI/code_bagel_hermes-2.5', 'amirveyseh/acronym_identification', 'cornell-movie-dialog/cornell_movie_dialog', 'fancyzhx/dbpedia_14', 'esnli/esnli', 'fever/fever', 'google/jigsaw_toxicity_pred', 'google/xquad', 'NbAiLab/NCC', 'ccdv/cnn_dailymail', 'ccdv/patent-classification', 'DFKI-SLT/few-nerd', 'solomonk/reddit_mental_health_posts', 'carolina-c4ai/corpus-carolina', 'thu-coai/lccc', 'fabiochiu/medium-articles', 'FinanceInc/auditor_sentiment', 'nateraw/midjourney-texttoimage-new', 'HuggingFaceH4/self-instruct-seed', 'RyokoAI/CNNovel125K', 'IndianaUniversityDatasetsModels/MIMIC-medical-report', 'samhog/psychology-10k', 'HuggingFaceH4/databricks_dolly_15k', 'heegyu/open-korean-instructions', 'logo-wizard/modern-logo-dataset', 'sam-mosaic/hhrlhf_evol_chatml', '4eJIoBek/PAIT-Downloads', 'kunishou/hh-rlhf-49k-ja', 'fblgit/tree-of-knowledge', 'TigerResearch/tigerbot-law-plugin', 'kaist-ai/Multilingual-CoT-Collection', 'mcipriano/stackoverflow-kubernetes-questions', 'jondurbin/airoboros-gpt4-1.4', 'SALT-NLP/LLaVAR', 'declare-lab/flan-mini', 'jondurbin/airoboros-gpt4-2.0', 'seungheondoh/LP-MusicCaps-MSD', 'AILab-CVC/SEED-Bench', 'zjunlp/InstructIE', 'nisaar/LLAMA2_Legal_Dataset_4.4k_Instructions', 'nampdn-ai/tiny-lessons', 'Healthy13/Text2SQL', 'MBZUAI-LLM/SlimPajama-627B-DC', 'a686d380/sis-novel', 'fedml/PubMedQA_instruction', 'meta-math/MetaMathQA-40K', 'PocketDoc/Choose-Your-Story-Long-Text-Adventures', 'SinKove/synthetic_mammography_csaw', 'unalignment/spicy-3.1', 'locuslab/TOFU', 'OpenGVLab/VideoChat2-IT', 'LLM360/CrystalCoderDatasets', 'argilla/ultrafeedback-curated', 'HuggingFaceH4/grok-conversation-harmless', 'HuggingFaceH4/OpenHermes-2.5-1k-longest', 'Ziyuan111/DurhamTrees', '2A2I/Arabic-OpenHermes-2.5', 'Locutusque/arc-cot', 'osunlp/Multimodal-Mind2Web', 'rc9494/SP500_Date_Offset', 'EleutherAI/lichess-puzzles', 'conceptnet5/conceptnet5', 'allenai/cosmos_qa', 'thunlp/docred', 'facebook/md_gender_bias', 'apple/mkqa', 'iastate/onestop_english', 'KorQuAD/squad_kor_v1', 'allenai/swag', 'tweets-hate-speech-detection/tweets_hate_speech_detection', 'wmt/wmt16', 'ChristophSchuhmann/MS_COCO_2017_URL_TEXT', 'SetFit/emotion', 'ai4bharat/samanantar', 'ccdv/arxiv-classification', 'mteb/tweet_sentiment_extraction', 'beki/privy', 'zoheb/sketch-scene', 'WINGNUS/ACL-OCL', 'haor/pixiv_month_top50', 'HuggingFaceM4/COCO', 'haor/pixiv-yandere', 'Plachta/Umamusume-voice-text-pairs', 'keremberke/chest-xray-classification', 'keremberke/table-extraction', 'silatus/1k_Website_Screenshots_and_Metadata', 'IlyaGusev/habr', 'KrakExilios/koreandoll', 'pmoe7/SP_500_Stocks_Data-ratios_news_price_10_yrs', 'potsawee/wiki_bio_gpt3_hallucination', 'RyokoAI/Fandom23K', 'Bingsu/ko_alpaca_data', 'medalpaca/medical_meadow_wikidoc_patient_information', 'Papersnake/people_daily_news', 'FreedomIntelligence/phoenix-sft-data-v1', 'howard-hou/OCR-VQA', 'silk-road/Vanilla-chinese-alpaca-luotuo', 'danielv835/personal_finance_v0.2', 'silk-road/Luotuo-QA-A-CoQA-Chinese', 'gretelai/symptom_to_diagnosis', 'agkphysics/AudioSet', 'YeungNLP/ultrachat', 'Iess/chinese_modern_poetry', 'wendlerc/RenderedText', 'Oasis-Team/Oasis-Corpus', 'qgyd2021/chinese_chitchat', 'MattCoddity/dockerNLcommands', 'yuyijiong/Long-Instruction', 'Skywork/ChineseDomainModelingEval', 'xinrongzhang2022/InfiniteBench', 'MohamedRashad/multilingual-tts', 'silk-road/ChatHaruhi-Expand-118K', 'Luckyjhg/Geo170K', 'andersonbcdefg/synthetic_tuples_gpt35_turbo', 'Rtian/DebugBench', 'euclaise/reddit-instruct', 'Locutusque/hercules-v1.0', 'mastergopote44/Long-Term-Care-Aggregated-Data', 'ontocord/CulturaY', 'Qdrant/dbpedia-entities-openai3-text-embedding-3-large-3072-1M', 'mlabonne/chatml-OpenHermes2.5-dpo-binarized-alpha', 'jg583/NSynth', 'storytracer/LoC-PD-Books', 'zhongshsh/CLoT-Oogiri-GO', 'davidkim205/kollm-converations', 'Locutusque/hercules-v4.0', 'tdiggelm/climate_fever', 'hfl/cmrc2018', 'mrqa-workshop/mrqa', 'google-research-datasets/nq_open', 'kyunghyuncho/search_qa', 'IWSLT/ted_talks_iwslt', 'ubuntu-dialogs-corpus/ubuntu_dialogs_corpus', 'SetFit/enron_spam', 'gsarti/flores_101', 'vblagoje/lfqa', 'huggan/pokemon', 'joelniklaus/lextreme', 'OxAISH-AL-LLM/wiki_toxic', 'tomasg25/scientific_lay_summarisation', 'svjack/pokemon-blip-captions-en-zh', 'lambdalabs/naruto-blip-captions', 'shunk031/wrime', 'marmal88/skin_cancer', 'IlyaGusev/rulm', 'datadrivenscience/ship-detection', 'Junity/UmaMusume-TokaiTeio-Dataset', 'Den4ikAI/russian_dialogues', 'LinhDuong/chatdoctor-200k', 'Nebulous/gpt4all_pruned', 'camel-ai/ai_society_translated', 'alpindale/light-novels', 'iamketan25/roleplay-instructions-dataset', 'VMware/open-instruct-v1-oasst-dolly-hhrlhf', 'Nan-Do/code-search-net-python', 'ShoukanLabs/OpenNiji-Dataset', 'Birchlabs/openai-prm800k-stepwise-critic', 'Norquinal/claude_evol_instruct_210k', 'mlfoundations/datacomp_1b', 'tasksource/icl-symbol-tuning-instruct', 'findnitai/english-to-hinglish', 'pankajmathur/dolly-v2_orca', 'sudy-super/dialogsum-ja', 'sayakpaul/hf-codegen-v2', 'FreedomIntelligence/CMB', 'jamescalam/llama-2-arxiv-papers-chunked', 'smangrul/hf-stack-v1', 'abacusai/LongChat-Lines', 'PetraAI/PetraAI', 'sinarashidi/alpaca-persian', 'neural-bridge/rag-hallucination-dataset-1000', 'google/trueteacher', 'twang2218/chinese-law-and-regulations', 'Loie/Auto-ACD', 'CollectiveCognition/chats-data-2023-09-22', 'CollectiveCognition/chats-data-2023-09-27', 'a686d380/h-eval', 'guangyil/laion-coco-aesthetic', 'ajibawa-2023/Code-74k-ShareGPT', 'ChuckMcSneed/NeoEvalPlusN_benchmark', 'matsuxr/JaGovFaqs-22k', 'NobodyExistsOnTheInternet/ToxicQAFinal', 'jondurbin/bagel-v0.3', 'allenai/preference-test-sets', 'xingyaoww/code-act', 'moukaii/Tuberculosis_Dataset', 'abacusai/ARC_DPO_FewShot', 'tinyBenchmarks/tinyMMLU', 'HPLT/hplt_monolingual_v1_2', 'maywell/koVast', 'unicamp-dl/quati', 'YanweiLi/MGM-Instruction', 'BLINK-Benchmark/BLINK', 'abacusai/SystemChat-1.1', 'DLI-Lab/pearl', 'Vi-VLM/Vista', 'microsoft/crd3', 'odegiber/hate_speech18', 'Helsinki-NLP/kde4', 'kuznetsoffandrey/sberquad', 'McGill-NLP/stereoset', 'unimorph/universal_morphologies', 'uclanlp/wino_bias', 'CAiRE/ASCEND', 'huggingface/label-files', 'laion/laion5B-index', 'vicenteor/sbu_captions', 'McGill-NLP/FaithDial', 'LIUM/tedlium', 'AlekseyKorshuk/persona-chat', 'allenai/multi_lexsum', 'DeveloperOats/DBPedia_Classes', 'shailja/Verilog_GitHub', 'akariasai/PopQA', 'deepghs/game_characters', 'nlphuji/whoops', 'FredZhang7/anime-prompts-180K', 'HuggingFaceH4/instruct_me', 'mozilla-foundation/common_voice_12_0', 'LangChainDatasets/agent-search-calculator', 'jamescalam/langchain-docs', 'cognitivecomputations/leet10k-alpaca', 'Babelscape/multinerd', 'kz-transformers/multidomain-kazakh-dataset', 'LLMs/Alpaca-ShareGPT', 'milashkaarshif/MoeGirlPedia_wikitext_raw_archive', 'jainr3/diffusiondb-pixelart', 'tau/zero_scrolls', 'MU-NLPC/Calc-ape210k', 'dbdu/ShareGPT-74k-ko', 'bavest/fin-llama-dataset', 'TigerResearch/tigerbot-kaggle-leetcodesolutions-en-2k', 'Slep/LAION-RVS-Fashion', 'flaviagiammarino/vqa-rad', 'L4NLP/LEval', 'sudy-super/CoTangent', 'newsletter/SDXL-Artists', 'liuhaotian/llava-bench-in-the-wild', 'mlabonne/CodeLlama-2-20k', 'lamini/lamini_docs', 'marmikpandya/mental-health', 'ibm-nasa-geospatial/multi-temporal-crop-classification', 'Universal-NER/Pile-NER-type', 'm720/SHADR', 'nampdn-ai/tiny-math-textbooks', 'squarelike/ko_medical_chat', 'declare-lab/HarmfulQA', 'OpenDriveLab/DriveLM', 'neovalle/H4rmony', 'vibhorag101/phr_mental_therapy_dataset', 'Vision-Flan/vision-flan_191-task_1k', 'ahmed-masry/ChartQA', 'ProlificAI/social-reasoning-rlhf', 'BAAI/DataOptim', 'Heralax/Augmental-Dataset', 'LLM-Tuning-Safety/HEx-PHI', 'kwaikeg/KAgentBench', 'SeaLLMs/Sea-bench', 'athirdpath/DPO_Pairs-Roleplay-Alpaca-NSFW-v1-SHUFFLED', 'yale-nlp/FOLIO', 'RealTimeData/bbc_news_alltime', 'HuggingFaceH4/orca_dpo_pairs', 'NebulaeWis/gelbooru_images', 'llm-blender/Unified-Feedback', 'grimulkan/LimaRP-augmented', 'cyberagent/chatbot-arena-ja-calm2-7b-chat-experimental', 'ehristoforu/midjourney-images', 'Jiwonny29/project1', 'Major-TOM/Core-S2L1C', 'gorilla-llm/Berkeley-Function-Calling-Leaderboard', 'julep-ai/openai-community-posts', 'SALT-NLP/Design2Code', 'Locutusque/OpenCerebrum-SFT', 'm-a-p/CodeEditorBench', 'chansung/merged_ds_coding', 'spectrallabs/credit-scoring-training-dataset', 'shareAI/DPO-zh-en-emoji', 'rqq/GLM-4-Instruct-4K-zh', 'Helsinki-NLP/bible_para', 'UFRGS/brwac', 'ZihanWangKi/conllpp', 'facebook/covost2', 'dvilares/head_qa', 'facebook/lama', 'yaolu/multi_x_science_sum', 'ptb-text-only/ptb_text_only', 'allenai/social_bias_frames', 'stanfordnlp/sst', 'defunct-datasets/the_pile_openwebtext2', 'google/wiki40b', 'google-research-datasets/wiki_atomic_edits', 'botisan-ai/cantonese-mandarin-translations', 'nlpaueb/finer-139', 'Stanford/wikitablequestions', 'silver/lccc', 'facebook/content_rephrasing', 'Twitter/TwitterFollowGraph', 'Nerfgun3/wlop_style', 'TheFusion21/PokemonCards', 'jeanlee/kmhas_korean_hate_speech', 'sander-wood/irishman', 'tobiolatunji/afrispeech-200', 'swaption2009/20k-en-zh-translation-pinyin-hsk', 'danielshemesh/midjourney', 'Elfsong/ClinicalDataset', 'Den4ikAI/russian_instructions', 'paulofinardi/OIG_small_chip2_portuguese_brasil', 'acheong08/nsfw_reddit', 'VISION-Workshop/VISION-Datasets', 'P1ayer-1/chatgpt-conversations-chatlogs.net', 'wavpub/JinJinLeDao_QA_Dataset', 'lang-uk/every_prompt', 'pki/SecurityGPT', 'zjkarina/matreshka', 'deepghs/nsfw_detect', 'JasperLS/prompt-injections', 'ccmusic-database/music_genre', 'jondurbin/airoboros-gpt4', 'TigerResearch/pretrain_en', 'mit-han-lab/awq-model-zoo', 'Nan-Do/reason_code-search-net-python', 'saldra/sakura_japanese_dataset', 'explodinggradients/fiqa', '64bits/lex_fridman_podcast_for_llm_vicuna', 'KShivendu/dbpedia-entities-openai-1M', 'Glavin001/startup-interviews', 'FredZhang7/toxi-text-3M', 'joonhok-exo-ai/korean_law_open_data_precedents', 'UmaDiffusion/ULTIMA-prompts', 'ArtifactAI/arxiv_python_research_code', 'NebulaByte/E-Commerce_Customer_Support_Conversations', 'HuggingFaceM4/LLaVAR-Instruct-16K', 'Locutusque/InstructMix', 'shahules786/Multi-chapter-summaries', 'ai4privacy/pii-masking-65k', 'Universal-NER/Pile-NER-definition', 'jojo0217/korean_rlhf_dataset', 'kernelmachine/open-license-corpus', 'Xilabs/PIPPA-alpaca', 'Suprit/CMtMedQA', 'ticoAg/Chinese-medical-dialogue', 'Yirany/UniMM-Chat', 'xuqinyang/BaiduBaike-5.63M', 'jamescalam/agent-conversations-retrieval-tool', 'zhiqings/LLaVA-Human-Preference-10K', 'qgyd2021/rlhf_reward_dataset', 'gathnex/Gath_baize', 'a686d380/h-corpus-raw', 'flytech/llama-python-codes-30k', 'open-phi/ft-sample-mistral', 'hkust-nlp/deita-6k-v0', 'Doctor-Shotgun/no-robots-sharegpt', 'styletts2-community/multilingual-phonemes-10k-alpha', 'imone/OpenOrca_FLAN', 'osv5m/osv5m', 'multimodalart/steamboat-willy-frames', 'irlab-udc/metahate', 'grimulkan/theory-of-mind', 'ai4bharat/indic-instruct-data-v0.1', 'kobprof/skolegpt-instruct', 'Ejafa/ye-pop', 'steamcyclone/Pill_Ideologies-Post_Titles', 'euclaise/reddit-instruct-curated', 'VatsaDev/animebench-alpha', '0-hero/prompt-perfect-dpo', 'MedRAG/textbooks', 'TIGER-Lab/Mantis-Instruct', 'ChuckMcSneed/various_RP_system_prompts', 'chenmingxuan/Chinese-Patent-Summary', 'cassiekang/cub200_dataset', 'antiven0m/catboros-3.2-dpo', 'ai4privacy/pii-masking-300k', 'multilingual/orca_dpo_pairs', 'BigAction/the-wave-clean', 'legacy-datasets/ami', 'TheBritishLibrary/blbooks', 'convai-challenge/conv_ai_3', 'tuetschek/e2e_nlg', 'iamollas/ethos', 'Helsinki-NLP/europarl', 'nanyang-technological-university-singapore/hkcancor', 'ucsbnlp/liar', 'Maluuba/newsqa', 'SemEvalWorkshop/sem_eval_2018_task_1', 'rcds/swiss_judgment_prediction', 'JAugusto97/told-br', 'leondz/wnut_17', 'CodedotAI/code_clippy_github', 'castorini/mr-tydi', 'flax-sentence-embeddings/stackexchange_math_jsonl', 'jfrenz/legalglue', 'ml6team/cnn_dailymail_nl', 'sentence-transformers/parallel-sentences', 'sentence-transformers/reddit-title-body', 'stas/openwebtext-10k', 'Azu/Handwritten-Mathematical-Expression-Convert-LaTeX', 'patriziobellan/PET', 'mozilla-foundation/common_voice_9_0', 'bloomberg/entsum', 'carblacac/twitter-sentiment-analysis', 'HuggingFaceM4/VQAv2', 'LHF/escorpius', 'owaiskha9654/PubMed_MultiLabel_Text_Classification_Dataset_MeSH', 'masakhane/mafand', 'Muennighoff/P3', 'Dahoas/instruct-synthetic-prompt-responses', 'mjw/stock_market_tweets', 'Korakoe/NijiJourney-Prompt-Pairs', 'mrm8488/unnatural-instructions-full', 'yuvalkirstain/PickaPic', 'keremberke/blood-cell-object-detection', 'keremberke/license-plate-object-detection', 'forta/malicious-smart-contract-dataset', 'ChristophSchuhmann/essays-with-instructions', 'HuggingFaceH4/helpful-instructions', 'nanaaaa/emotion_chinese_english', 'wbbbbb/pclue', 'lansinuote/ChnSentiCorp', 'katanaml-org/invoices-donut-data-v1', 'mxeval/mbxp', 'somosnlp/somos-clean-alpaca-es', 'amaydle/npc-dialogue', 'KK04/LogicInference_OA', 'rajuptvs/ecommerce_products_clip', 'hanamizuki-ai/genshin-voice-v3.5-mandarin', 'sukaka/novelai-webui', 'icybee/share_gpt_90k_v1', 'michelleyunun/therapydata', 'jaydenccc/AI_Storyteller_Dataset', 'atasoglu/databricks-dolly-15k-tr', 'PaulAdversarial/all_news_finance_sm_1h2023', 'juletxara/mgsm', 'FreedomIntelligence/huatuo26M-testdatasets', 'mio/sukasuka-anime-vocal-dataset', 'causalnlp/corr2cause', 'tabtoyou/KoLLaVA-Instruct-150k', 'ibm-nasa-geospatial/hls_burn_scars', 'hkust-nlp/felm', 'nisaar/Lawyer_GPT_India', 'mrzlab630/trading-candles', 'ai4privacy/pii-masking-43k', 'burkelibbey/colors', 'SiberiaSoft/SiberianPersonaChat', 'abacusai/WikiQA-Free_Form_QA', 'LibrAI/do-not-answer', 'nampdn-ai/mini-CoT-Collection', 'nampdn-ai/devdocs.io', 'TokenBender/roleplay_alpaca', 'bupt/LawDataset-BUPT', 'jondurbin/airoboros-2.2', 'apf1/datafilteringnetworks_2b', '04RR/tiny-instruct', 'emozilla/yarn-train-tokenized-16k-mistral', 'FreedomIntelligence/Huatuo26M-Lite', 'Hypersniper/riddles_v1', 'q-future/Q-Instruct-DB', 'ai-forever/MERA', 'THUDM/BPO', 'echo840/Detailed_Caption', 'glnmario/news-qa-summarization', 'TriadParty/deepsex-RP', 'pixparse/cc3m-wds', 'Minami-su/Anime_novel_datasets', 'Gourieff/ReActor', 'cognitivecomputations/Code-74k-ShareGPT-Vicuna', 'dataautogpt3/Dalle3', 'DL3DV/DL3DV-Benchmark', 'CausalLM/GPT-4-Self-Instruct-Turkish', 'sablo/oasst2_curated', 'STEM-AI-mtl/Electrical-engineering', 'ikawrakow/imatrix-from-wiki-train', 'somewheresystems/dataclysm-arxiv', 'fblgit/simple-math', 'fblgit/simple-math-DPO', 'acon96/Home-Assistant-Requests', 'Query-of-CC/Knowledge_Pile', 'OpenDatasets/dalle-3-dataset', 'ptx0/photo-concept-bucket', 'zjunlp/iepile', 'BatsResearch/ctga-v1', 'MMInstruction/ArxivQA', 'hotchpotch/JQaRA', 'sean0042/KorMedMCQA', 'p1atdev/ichikara-instruction', 'maywell/LogicKor', 'davanstrien/dataset-tldr', 'xcodemind/vision2ui', 'lawinstruct/lawinstruct', 'UCSC-VLAA/HQ-Edit', 'kigner/ruozhiba-llama3-tt', 'H-D-T/Select-Stack', 'mutiyama/alt', 'iabufarha/ar_sarcasm', 'nilc-nlp/assin2', 'cam-cst/cbt', 'NLP-AUEB/eurlex', 'facebook/kilt_wikipedia', 'legacy-datasets/multilingual_librispeech', 'ucirvine/reuters21578', 'stanfordnlp/sentiment140', 'ccasimiro/squad_es', 'defunct-datasets/the_pile_stack_exchange', 'facebook/wiki_movies', 'Fraser/python-state-changes', 'Hellisotherpeople/DebateSum', 'SocialGrep/one-million-reddit-jokes', 'blinoff/medical_qa_ru_data', 'huggingface/transformers-metadata', 'indonesian-nlp/id_newspapers_2018', 'openclimatefix/nimrod-uk-1km', 'sentence-transformers/msmarco-hard-negatives', 'nthngdy/oscar-small', 'jiangjiechen/ekar_chinese', 'sil-ai/bloom-captioning', 'orieg/elsevier-oa-cc-by', 'songweig/imagenet_sketch', 'sileod/movie_recommendation', 'google/quickdraw', 'huggingface-legal/takedown-notices', 'demelin/moral_stories', 'RUCAIBox/Chinese-Generation', 'Bingsu/zeroth-korean', 'shjwudp/shu', 'CarperAI/pile-v2-small-filtered', 'citeseerx/ACL-fig', 'keremberke/painting-style-classification', 'jordyvl/DUDE_loader', 'mlfoundations/datacomp_pools', 'Loie/VGGSound', 'artem9k/ai-text-detection-pile', 'HuggingFaceH4/hhh_alignment', 'hendrycks/ethics', 'IlyaGusev/pikabu', 'Aditya011/autotrain-data-nl-to-sql', 'sedthh/tv_dialogue', 'AnonymousSub/MedQuAD_Context_Question_Answer_Triples_TWO', 'instruction-tuning-sd/cartoonization', 'Polyglot-or-Not/Fact-Completion', 'llm-wizard/Product-Descriptions-and-Ads', 'emplocity/owca', 'FronkonGames/steam-games-dataset', 'lucasmccabe-lmi/codex_math_qa_alpaca_style', 'ms903/Diff-SVC-refactor-pre-trained-model', 'FourthBrainGenAI/AI-Superstar-Dataset', 'Maciel/FinCUGE-Instruction', 'HuggingFaceH4/code_evaluation_prompts', 'hoskinson-center/minif2f-lean4', 'Fsoft-AIC/the-vault-function', 'wangrongsheng/HealthCareMagic-100k-en', 'edarchimbaud/timeseries-1d-stocks', 'lighteval/mmlu', 'lucasmccabe-lmi/CodeAlpaca-20k', 'DavidVivancos/MindBigData2023_MNIST-8B', 'Meranti/CLAP_freesound', 'flaviagiammarino/path-vqa', 'projectlosangeles/Los-Angeles-MIDI-Dataset', 'Babelscape/SREDFM', 'Norquinal/claude_multi_instruct_1k', 'shumpei2525/fine_tuning521k-ja', 'pankajmathur/orca_minis_uncensored_dataset', 'flozi00/conversations', 'InfImagine/FakeImageDataset', 'wyzelabs/RuleRecommendation', 'squarelike/sharegpt_deepl_ko_translation', 'gpt4life/alpaca_claud_filtered', 'pankajmathur/orca_mini_v1_dataset', 'nampdn-ai/tiny-bridgedict', 'cmcjas/SDXL_ComfyUI_workflows', 'rombodawg/MegaCodeTraining', 'morpheuslord/cve-llm-training', 'ymoslem/Law-StackExchange', 'krisfu/awesome-llm-datasets-only-Chinese', 'TaylorAI/pubmed_commercial', 'kyujinpy/KoCoT_2000', 'mychen76/invoices-and-receipts_ocr_v1', 'kunishou/amenokaku-code-instruct', 'approximatelabs/tablib-v1-sample', 'swj0419/WikiMIA', 'llmware/rag_instruct_test_dataset_0.1', 'rizerphe/glaive-function-calling-v2-zephyr', 'yuyijiong/Book_Summary_Chinese', 'winglian/no_robots_rlhf', 'castorini/wura', 'diffusers/benchmarks', 'nuprl/EditPackFT', 'craigwu/vstar_bench', 'Undi95/toxic-dpo-v0.1-sharegpt', 'kunishou/oasst2-135k-ja', 'ChuckMcSneed/WolframRavenwolfs_benchmark_results', 'CausalLM/GPT-4-Self-Instruct-Japanese', 'jtatman/stable-diffusion-prompts-uncensored', 'lowres/anime', 'MediaTek-Research/TCEval-v2', 'AGBonnet/augmented-clinical-notes', 'HuggingFaceH4/cai-conversation-harmless', 'lmms-lab/VQAv2', 'lmms-lab/DocVQA', 'Mutonix/RefGPT-Fact-v2', 'ba188/NHS_HES', 'ajibawa-2023/Children-Stories-Collection', 'Vikhrmodels/LLaVA-Instruct-ru', 'Doctor-Shotgun/theory-of-mind-dpo', 'divyasharma0795/AppleVisionPro_Tweets', 'TIGER-Lab/MATH-plus', 'cgato/SlimOrcaDedupCleaned', 'YanweiLi/MGM-Pretrain', 'HuggingFaceH4/llava-instruct-mix-vsft', 'fal-ai/imgsys-results', 'mzbac/function-calling-llama-3-format-v1.1', 'Yale-LILY/aeslc', 'google-research-datasets/aquamuse', 'allenai/atomic', 'CFPB/consumer-finance-complaints', 'rishitdagli/cppe-5', 'stanfordnlp/craigslist_bargains', 'illuin/fquad', 'google-research-datasets/google_wellformed_query', 'yavuzkomecoglu/interpress_news_category_tr_lite', 'thu-coai/kd_conv_with_kb', 'kakaobrain/kor_nli', 'ParaPat/para_pat', 'google-research-datasets/poem_sentiment', 'eusip/silicone', 'LSDSem/story_cloze', 'turkic-interlingua/turkic_xwmt', 'bea2019st/wi_locness', 'fancyzhx/yelp_polarity', 'CodedotAI/code_clippy', 'SetFit/sst5', 'deepset/germandpr', 'flax-sentence-embeddings/stackexchange_titlebody_best_and_down_voted_answer_jsonl', 'microsoft/codexglue_method_generation', 'nickmuchi/financial-classification', 'uitnlp/vietnamese_students_feedback', 'ydshieh/coco_dataset_script', 'cgarciae/cartoonset', 'DMetaSoul/chinese-semantic-textual-similarity', 'ukr-models/Ukr-Synth', 'Matthijs/snacks', 'csebuetnlp/CrossSum', 'Moo/korean-parallel-corpora', 'HuggingFaceM4/TGIF', 'khalidalt/tydiqa-goldp', 'mteb/amazon_reviews_multi', 'silver/mmchat', 'fmplaza/offendes', 'ColumbiaNLP/FLUTE', 'tner/ontonotes5', 'jordanparker6/publaynet', 'tarteel-ai/quranqa', 'OATML-Markslab/ProteinGym', 'google/cvss', 'RUCAIBox/Open-Dialogue', 'cardiffnlp/tweet_topic_multi', 'priyank-m/chinese_text_recognition', 'skytnt/fbanimehq', 'huggingface-projects/color-palettes-sd', 'heegyu/namuwiki', 'FremyCompany/BioLORD-Dataset', 'nikitam/ACES', 'nitrosocke/arcane-diffusion-dataset', 'Twitter/TwitterFaveGraph', 'ju-resplande/qa-pt', 'Short-Answer-Feedback/saf_communication_networks_english', 'hoskinson-center/proofnet', 'Erythrocyte/Diff-SVC_Genshin_Datasets', 'nyanko7/pixiv_top50', 'ashraf-ali/quran-data', 'Nerfgun3/splash_art', 'nelorth/oxford-flowers', 'laion/laion2b-en-vit-l-14-embeddings', 'lsy641/PsyQA', 'masakhane/masakhaner2', 'alexandreteles/mental-health-conversational-data', 'joelniklaus/legal_case_document_summarization', 'Cohere/wikipedia-22-12-zh-embeddings', 'ruanchaves/hatebr', 'liyucheng/chinese_metaphor_dataset', 'pierreguillou/DocLayNet-large', 'range3/cc100-ja', 'Supermaxman/esa-hubble', 'Den4ikAI/russian_instructions_2', 'nlpcloud/instructions-dataset-adapted-from-stanford-alpaca-for-gpt-j', 'medalpaca/medical_meadow_mediqa', 'InstaDeepAI/multi_species_genomes', 'larryvrh/WikiMatrix-v1-Ja_Zh-filtered', 'IlyaGusev/ru_sharegpt_cleaned', 'LEAP/ClimSim_high-res', 'niizam/4chan-datasets', 'kunishou/databricks-dolly-69k-ja-en-translation', 'enryu43/twitter100m_tweets', 'heegyu/korquad-chat-v1', 'griffin/ChemSum', 'KakologArchives/KakologArchives', 'openllmplayground/pandagpt_visual_instruction_dataset', 'fujiki/japanese_alpaca_data', 'zhiqings/dromedary-65b-verbose-clone-v0', 'hammer888/interior_style_dataset', 'edarchimbaud/timeseries-1m-stocks', 'FremyCompany/AGCT-Dataset', 'project-sloth/captcha-images', 'jondurbin/rosettacode-raw', 'collabora/whisperspeech', 'microsoft/LCC_csharp', 'YeungNLP/school_math_0.25M', 'portuguese-benchmark-datasets/BLUEX', 'globis-university/aozorabunko-clean', 'totally-not-an-llm/sharegpt-hyperfiltered-3k', 'DAMO-NLP-MT/multialpaca', 'crumb/Wizard-EvolInstruct70k-k4', 'd0rj/OpenOrca-ru', 'jed351/Traditional-Chinese-Common-Crawl-Filtered', 'v2ray/jannie-log', 'abacusai/WikiQA-Altered_Numeric_QA', 'ChrisHayduk/Llama-2-SQL-Dataset', 'TempoFunk/hdvila-100M', 'tyang816/MedChatZH', 'Falah/image_generation_prompts_SDXL', 'turing-motors/LLaVA-Instruct-150K-JA', 'OpenAssistant/OASST-DE', 'jitx/Methods2Test_java_unit_test_code', 'llvm-ml/ComPile', 'BleachNick/MIC_full', 'bugdaryan/sql-create-context-instruction', 'harvard-lil/cold-cases', 'knowrohit07/ArithmeLogic', 'mikonvergence/LAION-EO', 'euclaise/writingprompts', 'erhwenkuo/medical_dialogue-chinese-zhtw', 'Nexusflow/NexusRaven_API_evaluation', 'jackhhao/jailbreak-classification', 'cmalaviya/expertqa', 'meta-math/GSM8K_Backward', 'jamescalam/ai-arxiv', 'yuyijiong/Long-instruction-en2zh', 'microsoft/kitab', 'MemGPT/MSC-Self-Instruct', 'AI-Secure/DecodingTrust', 'ShashiVish/cover-letter-dataset', 'umarigan/turkiye_finance_qa', 'allenai/scirepeval', 'tahrirchi/uz-books', 'yuyijiong/LongPaper_multitask', 'pseudolab/MedSi', 'lavita/medical-qa-datasets', 'vilm/OpenOrca-Viet', 'kyujinpy/KOR-OpenOrca-Platypus-v3', 'akemiH/NoteChat', 'openerotica/erotiquant', 'listen2you002/ChartLlama-Dataset', 'saillab/taco-datasets', 'nuprl/CanItEdit', 'kyujinpy/orca_math_dpo', 'adamkarvonen/chess_games', 'blancsw/oasst2_top1_chat_format', 'Awiny/Howto-Interlink7M', 'NobodyExistsOnTheInternet/ToxicDPOqa', 'VatsaDev/worldbuild', 'lorinma/NL2SQL_zh', 'mlabonne/chessllm', 'genggui001/gg_zh_v1_550B', 'DL3DV/DL3DV-ALL-4K', 'paraloq/json_data_extraction', 'tastypear/unalignment-toxic-dpo-v0.2-zh_cn', 'hpprc/jawiki', 'eduagarcia/LegalPT_dedup', 'christopherthompson81/quant_exploration', 'alvarobartt/dpo-mix-7k-simplified', 'ucekmez/OpenOrca-tr', 'ehristoforu/dalle-3-images', 'ivrit-ai/whisper-training', 'SPRIGHT-T2I/spright', 'coseal/CodeUltraFeedback_binarized', 'ParasiticRogue/Bluemoon-Light', 'wdndev/webnovel-chinese', 'jondurbin/bagel-v0.5', 'Lin-Chen/MMStar', 'tolgadev/turkish_73k_instruct_extended', 'Babelscape/ALERT_DPO', 'kigner/ruozhiba-llama3', 'davanstrien/dataset-tldr-preference-dpo', 'facebook/asset', 'barilan/blog_authorship_corpus', 'dataset-org/c3', 'clinc/clinc_oos', 'rexarski/eli5_category', 'mohnish/lc_quad', 'billion-word-benchmark/lm1b', 'ParaCrawl/para_crawl', 'crscardellino/spanish_billion_words', 'KorQuAD/squad_kor_v2', 'nunorc/squad_v1_pt', 'cgpotts/swda', 'nakhun/thaisum', 'wmt/wmt14', 'SetFit/20_newsgroups', 'bertin-project/mc4-sampling', 'lbox/lbox_open', 'codeparrot/codeparrot-clean-train', 'thomwolf/github-python', 'Adapting/empathetic_dialogues_v2', 'Bingsu/Human_Action_Recognition', 'mustapha/QuranExe', 'ceyda/fashion-products-small', 'frgfm/imagenette', 'naver-clova-ix/synthdog-en', 'bigscience/evaluation-results', 'pcuenq/oxford-pets', 'SLPL/syntran-fa', 'RUCAIBox/Story-Generation', 'jonathanli/law-stack-exchange', 'ai-forever/school_notebooks_RU', 'ashraq/esc50', 'waifu-research-department/regularization', 'sbx/superlim-2', 'ashraq/financial-news', 'AluminiumOxide/personal_latent_diffusion', 'elenanereiss/german-ler', 'Nerfgun3/flower_style', 'lmqg/qa_harvesting_from_wikipedia', 'Nerfgun3/land_style', 'NeelNanda/counterfact-tracing', 'VietAI/vi_pubmed', 'andyyang/stable_diffusion_prompts_2m', 'its5Q/yandex-q', 'wanng/laion-high-resolution-chinese', 'Salesforce/rose', 'Jean-Baptiste/financial_news_sentiment', 'diltdicker/romance_novel_data-2022'} other_datasets = {'OpenCo7/UpVoteWeb'} enabled_datasets = top_2k_most_liked_datasets | other_datasets # File: dataset-viewer-main/services/worker/src/worker/job_runners/split/split_job_runner.py from pathlib import Path from libcommon.dtos import JobInfo from libcommon.exceptions import ParameterMissingError from worker.config import AppConfig from worker.job_runners._job_runner_with_cache import JobRunnerWithCache from worker.job_runners._job_runner_with_datasets_cache import JobRunnerWithDatasetsCache from worker.job_runners.config.config_job_runner import ConfigJobRunner from worker.utils import check_split_exists class SplitJobRunner(ConfigJobRunner): split: str def __init__(self, job_info: JobInfo, app_config: AppConfig) -> None: super().__init__(job_info=job_info, app_config=app_config) if job_info['params']['split'] is None: raise ParameterMissingError("'split' parameter is required") self.split = job_info['params']['split'] def validate(self) -> None: check_split_exists(dataset=self.dataset, config=self.config, split=self.split) class SplitJobRunnerWithDatasetsCache(JobRunnerWithDatasetsCache, SplitJobRunner): def __init__(self, job_info: JobInfo, app_config: AppConfig, hf_datasets_cache: Path) -> None: JobRunnerWithDatasetsCache.__init__(self, job_info=job_info, app_config=app_config, hf_datasets_cache=hf_datasets_cache) SplitJobRunner.__init__(self, job_info=job_info, app_config=app_config) class SplitJobRunnerWithCache(JobRunnerWithCache, SplitJobRunner): def __init__(self, job_info: JobInfo, app_config: AppConfig, cache_directory: Path) -> None: JobRunnerWithCache.__init__(self, job_info=job_info, app_config=app_config, cache_directory=cache_directory) SplitJobRunner.__init__(self, job_info=job_info, app_config=app_config) # File: dataset-viewer-main/services/worker/src/worker/loop.py import logging import random import time from dataclasses import dataclass from datetime import datetime from typing import Optional, TypedDict import orjson from filelock import FileLock from libcommon.dtos import JobInfo from libcommon.prometheus import LongStepProfiler, StepProfiler from libcommon.queue.jobs import AlreadyStartedJobError, EmptyQueueError, LockTimeoutError, NoWaitingJobError, Queue from libcommon.utils import get_datetime from psutil import cpu_count, getloadavg, swap_memory, virtual_memory from worker.config import AppConfig from worker.job_manager import JobManager from worker.job_runner_factory import BaseJobRunnerFactory class WorkerState(TypedDict): current_job_info: Optional[JobInfo] last_updated: datetime @dataclass class Loop: job_runner_factory: BaseJobRunnerFactory app_config: AppConfig state_file_path: str def __post_init__(self) -> None: self.queue = Queue() def has_memory(self) -> bool: if self.app_config.worker.max_memory_pct <= 0: return True virtual_memory_used = int(virtual_memory().used) virtual_memory_total = int(virtual_memory().total) percent = (swap_memory().used + virtual_memory_used) / (swap_memory().total + virtual_memory_total) ok = percent < self.app_config.worker.max_memory_pct if not ok: logging.info(f'memory usage (RAM + SWAP) is too high: {percent:.0f}% - max is {self.app_config.worker.max_memory_pct}%') return ok def has_cpu(self) -> bool: if self.app_config.worker.max_load_pct <= 0: return True load_pct = max(getloadavg()[:2]) / cpu_count() * 100 ok = load_pct < self.app_config.worker.max_load_pct if not ok: logging.info(f'cpu load is too high: {load_pct:.0f}% - max is {self.app_config.worker.max_load_pct}%') return ok def has_resources(self) -> bool: return self.has_memory() and self.has_cpu() def sleep(self) -> None: jitter = 0.75 + random.random() / 2 duration = self.app_config.worker.sleep_seconds * jitter logging.debug(f'sleep during {duration:.2f} seconds') time.sleep(duration) def run(self) -> None: logging.info('Worker loop started') try: while True: if self.has_resources() and self.process_next_job(): continue with StepProfiler('loop', 'sleep'): self.sleep() except BaseException as err: logging.exception(f'quit due to an uncaught error: {err}') raise def process_next_job(self) -> bool: logging.debug('try to process a job') with StepProfiler('loop', 'start_job'): try: job_info = self.queue.start_job(difficulty_min=self.app_config.worker.difficulty_min, difficulty_max=self.app_config.worker.difficulty_max) self.set_worker_state(current_job_info=job_info) logging.debug(f'job assigned: {job_info}') except (EmptyQueueError, AlreadyStartedJobError, LockTimeoutError, NoWaitingJobError) as e: self.set_worker_state(current_job_info=None) logging.debug(e) return False with LongStepProfiler('loop', 'run_job'): job_runner = self.job_runner_factory.create_job_runner(job_info) job_manager = JobManager(job_info=job_info, app_config=self.app_config, job_runner=job_runner) job_result = job_manager.run_job() with StepProfiler('loop', 'finish_job'): job_manager.finish(job_result=job_result) self.set_worker_state(current_job_info=None) return True def set_worker_state(self, current_job_info: Optional[JobInfo]) -> None: worker_state: WorkerState = {'current_job_info': current_job_info, 'last_updated': get_datetime()} with FileLock(f'{self.state_file_path}.lock'): with open(self.state_file_path, 'wb') as worker_state_f: worker_state_f.write(orjson.dumps(worker_state)) # File: dataset-viewer-main/services/worker/src/worker/main.py import os import tempfile from libcommon.log import init_logging from libcommon.resources import CacheMongoResource, QueueMongoResource from libcommon.storage import init_duckdb_index_cache_dir, init_parquet_metadata_dir, init_statistics_cache_dir from libcommon.storage_client import StorageClient from worker.config import AppConfig from worker.executor import WorkerExecutor from worker.job_runner_factory import JobRunnerFactory from worker.resources import LibrariesResource WORKER_STATE_FILE_NAME = 'worker_state.json' if __name__ == '__main__': with tempfile.TemporaryDirectory() as tmp_dir: state_file_path = os.path.join(tmp_dir, WORKER_STATE_FILE_NAME) os.environ['WORKER_STATE_FILE_PATH'] = state_file_path app_config = AppConfig.from_env() init_logging(level=app_config.log.level) parquet_metadata_directory = init_parquet_metadata_dir(directory=app_config.parquet_metadata.storage_directory) duckdb_index_cache_directory = init_duckdb_index_cache_dir(directory=app_config.duckdb_index.cache_directory) statistics_cache_directory = init_statistics_cache_dir(app_config.descriptive_statistics.cache_directory) storage_client = StorageClient(protocol=app_config.assets.storage_protocol, storage_root=app_config.assets.storage_root, base_url=app_config.assets.base_url, overwrite=True, s3_config=app_config.s3) with LibrariesResource(hf_endpoint=app_config.common.hf_endpoint, init_hf_datasets_cache=app_config.datasets_based.hf_datasets_cache, numba_path=app_config.numba.path) as libraries_resource, CacheMongoResource(database=app_config.cache.mongo_database, host=app_config.cache.mongo_url) as cache_resource, QueueMongoResource(database=app_config.queue.mongo_database, host=app_config.queue.mongo_url) as queue_resource: if not cache_resource.is_available(): raise RuntimeError('The connection to the cache database could not be established. Exiting.') if not queue_resource.is_available(): raise RuntimeError('The connection to the queue database could not be established. Exiting.') job_runner_factory = JobRunnerFactory(app_config=app_config, hf_datasets_cache=libraries_resource.hf_datasets_cache, parquet_metadata_directory=parquet_metadata_directory, duckdb_index_cache_directory=duckdb_index_cache_directory, statistics_cache_directory=statistics_cache_directory, storage_client=storage_client) worker_executor = WorkerExecutor(app_config=app_config, job_runner_factory=job_runner_factory, state_file_path=state_file_path) worker_executor.start() # File: dataset-viewer-main/services/worker/src/worker/resources.py from dataclasses import dataclass, field from pathlib import Path from typing import Optional import datasets from datasets.utils.logging import get_verbosity, log_levels, set_verbosity from libcommon.resources import Resource @dataclass class LibrariesResource(Resource): hf_endpoint: str init_hf_datasets_cache: Optional[str] = None numba_path: Optional[str] = None previous_hf_endpoint: str = field(init=False) previous_hf_update_download_counts: bool = field(init=False) previous_verbosity: int = field(init=False) hf_datasets_cache: Path = field(init=False) def allocate(self) -> None: self.hf_datasets_cache = datasets.config.HF_DATASETS_CACHE if self.init_hf_datasets_cache is None else Path(self.init_hf_datasets_cache) self.previous_hf_endpoint = datasets.config.HF_ENDPOINT datasets.config.HF_ENDPOINT = self.hf_endpoint self.previous_hf_update_download_counts = datasets.config.HF_UPDATE_DOWNLOAD_COUNTS datasets.config.HF_UPDATE_DOWNLOAD_COUNTS = False self.previous_verbosity = get_verbosity() set_verbosity(log_levels['critical']) def release(self) -> None: datasets.config.HF_ENDPOINT = self.previous_hf_endpoint datasets.config.HF_UPDATE_DOWNLOAD_COUNTS = self.previous_hf_update_download_counts set_verbosity(self.previous_verbosity) # File: dataset-viewer-main/services/worker/src/worker/routes/metrics.py import logging from collections.abc import Callable, Coroutine from typing import Any from libcommon.prometheus import Prometheus from prometheus_client import CONTENT_TYPE_LATEST from starlette.requests import Request from starlette.responses import Response Endpoint = Callable[[Request], Coroutine[Any, Any, Response]] def create_metrics_endpoint() -> Endpoint: prometheus = Prometheus() async def metrics_endpoint(_: Request) -> Response: logging.info('/metrics') return Response(prometheus.getLatestContent(), headers={'Content-Type': CONTENT_TYPE_LATEST}) return metrics_endpoint # File: dataset-viewer-main/services/worker/src/worker/start_web_app.py import uvicorn from starlette.applications import Starlette from starlette.routing import Route from worker.config import UvicornConfig from worker.routes.healthcheck import healthcheck_endpoint from worker.routes.metrics import create_metrics_endpoint def create_app() -> Starlette: return Starlette(routes=[Route('/healthcheck', endpoint=healthcheck_endpoint), Route('/metrics', endpoint=create_metrics_endpoint())]) if __name__ == '__main__': uvicorn_config = UvicornConfig.from_env() uvicorn.run('worker.start_web_app:create_app', host=uvicorn_config.hostname, port=uvicorn_config.port, factory=True, workers=uvicorn_config.num_workers) # File: dataset-viewer-main/services/worker/src/worker/start_worker_loop.py import sys from libcommon.log import init_logging from libcommon.resources import CacheMongoResource, QueueMongoResource from libcommon.storage import init_duckdb_index_cache_dir, init_parquet_metadata_dir, init_statistics_cache_dir from libcommon.storage_client import StorageClient from worker.config import AppConfig from worker.job_runner_factory import JobRunnerFactory from worker.loop import Loop from worker.resources import LibrariesResource if __name__ == '__main__': app_config = AppConfig.from_env() state_file_path = app_config.worker.state_file_path if '--print-worker-state-path' in sys.argv: print(state_file_path, flush=True) if not state_file_path: raise RuntimeError('The worker state file path is not set. Exiting.') init_logging(level=app_config.log.level) parquet_metadata_directory = init_parquet_metadata_dir(directory=app_config.parquet_metadata.storage_directory) duckdb_index_cache_directory = init_duckdb_index_cache_dir(directory=app_config.duckdb_index.cache_directory) statistics_cache_directory = init_statistics_cache_dir(app_config.descriptive_statistics.cache_directory) storage_client = StorageClient(protocol=app_config.assets.storage_protocol, storage_root=app_config.assets.storage_root, base_url=app_config.assets.base_url, overwrite=True, s3_config=app_config.s3) with LibrariesResource(hf_endpoint=app_config.common.hf_endpoint, init_hf_datasets_cache=app_config.datasets_based.hf_datasets_cache, numba_path=app_config.numba.path) as libraries_resource, CacheMongoResource(database=app_config.cache.mongo_database, host=app_config.cache.mongo_url) as cache_resource, QueueMongoResource(database=app_config.queue.mongo_database, host=app_config.queue.mongo_url) as queue_resource: if not cache_resource.is_available(): raise RuntimeError('The connection to the cache database could not be established. Exiting.') if not queue_resource.is_available(): raise RuntimeError('The connection to the queue database could not be established. Exiting.') job_runner_factory = JobRunnerFactory(app_config=app_config, hf_datasets_cache=libraries_resource.hf_datasets_cache, parquet_metadata_directory=parquet_metadata_directory, duckdb_index_cache_directory=duckdb_index_cache_directory, statistics_cache_directory=statistics_cache_directory, storage_client=storage_client) loop = Loop(job_runner_factory=job_runner_factory, state_file_path=state_file_path, app_config=app_config) loop.run() # File: dataset-viewer-main/services/worker/src/worker/statistics_utils.py import enum import io import logging from pathlib import Path from typing import Any, Callable, Optional, TypedDict, Union import librosa import numpy as np import polars as pl import pyarrow.parquet as pq from datasets import Features from libcommon.exceptions import StatisticsComputationError from PIL import Image from tqdm.contrib.concurrent import thread_map DECIMALS = 5 NUM_BINS = 10 MAX_PROPORTION_STRING_LABELS = 0.2 MAX_NUM_STRING_LABELS = 1000 NO_LABEL_VALUE = -1 INTEGER_DTYPES = ['int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32', 'uint64'] FLOAT_DTYPES = ['float16', 'float32', 'float64'] NUMERICAL_DTYPES = INTEGER_DTYPES + FLOAT_DTYPES STRING_DTYPES = ['string', 'large_string'] class ColumnType(str, enum.Enum): FLOAT = 'float' INT = 'int' BOOL = 'bool' LIST = 'list' CLASS_LABEL = 'class_label' STRING_LABEL = 'string_label' STRING_TEXT = 'string_text' AUDIO = 'audio' IMAGE = 'image' class Histogram(TypedDict): hist: list[int] bin_edges: list[Union[int, float]] class NumericalStatisticsItem(TypedDict): nan_count: int nan_proportion: float min: Optional[float] max: Optional[float] mean: Optional[float] median: Optional[float] std: Optional[float] histogram: Optional[Histogram] class CategoricalStatisticsItem(TypedDict): nan_count: int nan_proportion: float no_label_count: int no_label_proportion: float n_unique: int frequencies: dict[str, int] class BoolStatisticsItem(TypedDict): nan_count: int nan_proportion: float frequencies: dict[str, int] SupportedStatistics = Union[NumericalStatisticsItem, CategoricalStatisticsItem, BoolStatisticsItem] class StatisticsPerColumnItem(TypedDict): column_name: str column_type: ColumnType column_statistics: SupportedStatistics def generate_bins(min_value: Union[int, float], max_value: Union[int, float], column_type: ColumnType, n_bins: int, column_name: Optional[str]=None) -> list[Union[int, float]]: if column_type is ColumnType.FLOAT: if min_value == max_value: bin_edges = [min_value] else: bin_size = (max_value - min_value) / n_bins bin_edges = np.arange(min_value, max_value, bin_size).astype(float).tolist() if len(bin_edges) != n_bins: raise StatisticsComputationError(f'Incorrect number of bins generated for column_name={column_name!r}, expected {n_bins}, got {len(bin_edges)}.') elif column_type is ColumnType.INT: bin_size = np.ceil((max_value - min_value + 1) / n_bins) bin_edges = np.arange(min_value, max_value + 1, bin_size).astype(int).tolist() if len(bin_edges) > n_bins: raise StatisticsComputationError(f'Incorrect number of bins generated for column_name={column_name!r}, expected {n_bins}, got {len(bin_edges)}.') else: raise ValueError(f'Incorrect column type of column_name={column_name!r}: {column_type}. ') return bin_edges + [max_value] def compute_histogram(df: pl.dataframe.frame.DataFrame, column_name: str, column_type: ColumnType, min_value: Union[int, float], max_value: Union[int, float], n_bins: int, n_samples: int) -> Histogram: logging.debug(f'Compute histogram for column_name={column_name!r}') bin_edges = generate_bins(min_value=min_value, max_value=max_value, column_name=column_name, column_type=column_type, n_bins=n_bins) if len(bin_edges) == 2: if bin_edges[0] != bin_edges[1]: raise StatisticsComputationError(f'Got unexpected result during histogram computation for column_name={column_name!r}, column_type={column_type!r}: len(bin_edges={bin_edges!r}) is 2 but bin_edges[0]={bin_edges[0]!r} != bin_edges[1]={bin_edges[1]!r}. ') hist = [int(df[column_name].is_not_null().sum())] elif len(bin_edges) > 2: bins_edges_reverted = [-1 * b for b in bin_edges[::-1]] hist_df_reverted = df.with_columns(pl.col(column_name).mul(-1).alias('reverse'))['reverse'].hist(bins=bins_edges_reverted) hist_reverted = hist_df_reverted['count'].cast(int).to_list() hist = hist_reverted[::-1] hist = [hist[0] + hist[1]] + hist[2:-2] + [hist[-2] + hist[-1]] else: raise StatisticsComputationError(f'Got unexpected result during histogram computation for column_name={column_name!r}, column_type={column_type!r}: unexpected bin_edges={bin_edges!r}') logging.debug(f'hist={hist!r} bin_edges={bin_edges!r}') if len(hist) != len(bin_edges) - 1: raise StatisticsComputationError(f"Got unexpected result during histogram computation for column_name={column_name!r}, column_type={column_type!r}: number of bins in hist counts and bin edges don't match hist={hist!r}, bin_edges={bin_edges!r}") if sum(hist) != n_samples: raise StatisticsComputationError(f"Got unexpected result during histogram computation for column_name={column_name!r}, column_type={column_type!r}: hist counts sum and number of non-null samples don't match, histogram sum={sum(hist)}, n_samples={n_samples!r}") return Histogram(hist=hist, bin_edges=np.round(bin_edges, DECIMALS).tolist() if column_type is column_type.FLOAT else bin_edges) def min_max_mean_median_std(data: pl.DataFrame, column_name: str) -> tuple[float, float, float, float, float]: col_stats = dict(min=pl.all().min(), max=pl.all().max(), mean=pl.all().mean(), median=pl.all().median(), std=pl.all().std()) stats_names = pl.Series(col_stats.keys()) stats_expressions = [pl.struct(stat) for stat in col_stats.values()] stats = data.select(pl.col(column_name)).select(name=stats_names, stats=pl.concat_list(stats_expressions).flatten()).unnest('stats') (minimum, maximum, mean, median, std) = stats[column_name].to_list() if any((statistic is None for statistic in [minimum, maximum, mean, median, std])): if not all((statistic is None for statistic in [minimum, maximum, mean, median, std])): raise StatisticsComputationError(f'Unexpected result for column_name={column_name!r}: Some measures among minimum={minimum!r}, maximum={maximum!r}, mean={mean!r}, median={median!r}, std={std!r} are None but not all of them. ') return (minimum, maximum, mean, median, std) (minimum, maximum, mean, median, std) = np.round([minimum, maximum, mean, median, std], DECIMALS).tolist() return (minimum, maximum, mean, median, std) def value_counts(data: pl.DataFrame, column_name: str) -> dict[Any, Any]: return dict(data[column_name].value_counts().rows()) def nan_count_proportion(data: pl.DataFrame, column_name: str, n_samples: int) -> tuple[int, float]: nan_count = data[column_name].null_count() nan_proportion = np.round(nan_count / n_samples, DECIMALS).item() if nan_count != 0 else 0.0 return (nan_count, nan_proportion) def all_nan_statistics_item(n_samples: int) -> NumericalStatisticsItem: return NumericalStatisticsItem(nan_count=n_samples, nan_proportion=1.0, min=None, max=None, mean=None, median=None, std=None, histogram=None) class Column: transform_column: Optional[type['Column']] = None def __init__(self, feature_name: str, n_samples: int): self.name = feature_name self.n_samples = n_samples @classmethod def compute_transformed_data(cls, *args: Any, **kwargs: Any) -> Any: raise NotImplementedError @classmethod def _compute_statistics(cls, *args: Any, **kwargs: Any) -> SupportedStatistics: raise NotImplementedError @classmethod def compute_statistics(cls, *args: Any, column_name: str, **kwargs: Any) -> Any: try: logging.info(f'Compute statistics for {cls.__name__} {column_name}. ') return cls._compute_statistics(*args, column_name=column_name, **kwargs) except Exception as error: raise StatisticsComputationError(f'Error for {cls.__name__}={column_name}: error={error!r}', error) def compute_and_prepare_response(self, *args: Any, **kwargs: Any) -> StatisticsPerColumnItem: raise NotImplementedError class ClassLabelColumn(Column): def __init__(self, *args: Any, feature_dict: dict[str, Any], **kwargs: Any): super().__init__(*args, **kwargs) self.feature_dict = feature_dict @classmethod def _compute_statistics(cls, data: pl.DataFrame, column_name: str, n_samples: int, feature_dict: dict[str, Any]) -> CategoricalStatisticsItem: datasets_feature = Features.from_dict({column_name: feature_dict})[column_name] (nan_count, nan_proportion) = nan_count_proportion(data, column_name, n_samples) ids2counts: dict[int, int] = value_counts(data, column_name) no_label_count = ids2counts.pop(NO_LABEL_VALUE, 0) no_label_proportion = np.round(no_label_count / n_samples, DECIMALS).item() if no_label_count != 0 else 0.0 num_classes = len(datasets_feature.names) labels2counts: dict[str, int] = {datasets_feature.int2str(cat_id): ids2counts.get(cat_id, 0) for cat_id in range(num_classes)} n_unique = data[column_name].n_unique() logging.debug(f'nan_count={nan_count!r} nan_proportion={nan_proportion!r} no_label_count={no_label_count!r} no_label_proportion={no_label_proportion!r}, n_unique={n_unique!r} labels2counts={labels2counts!r}') if n_unique > num_classes + int(no_label_count > 0) + int(nan_count > 0): raise StatisticsComputationError(f'Got unexpected result for ClassLabel column_name={column_name!r}: number of unique values is greater than provided by feature metadata. n_unique={n_unique!r}, datasets_feature={datasets_feature!r}, no_label_count={no_label_count!r}, nan_count={nan_count!r}. ') return CategoricalStatisticsItem(nan_count=nan_count, nan_proportion=nan_proportion, no_label_count=no_label_count, no_label_proportion=no_label_proportion, n_unique=num_classes, frequencies=labels2counts) def compute_and_prepare_response(self, data: pl.DataFrame) -> StatisticsPerColumnItem: stats = self.compute_statistics(data, column_name=self.name, n_samples=self.n_samples, feature_dict=self.feature_dict) return StatisticsPerColumnItem(column_name=self.name, column_type=ColumnType.CLASS_LABEL, column_statistics=stats) class FloatColumn(Column): @classmethod def _compute_statistics(cls, data: pl.DataFrame, column_name: str, n_samples: int) -> NumericalStatisticsItem: data = data.fill_nan(None) (nan_count, nan_proportion) = nan_count_proportion(data, column_name, n_samples) if nan_count == n_samples: return all_nan_statistics_item(n_samples) (minimum, maximum, mean, median, std) = min_max_mean_median_std(data, column_name) logging.debug(f'minimum={minimum!r}, maximum={maximum!r}, mean={mean!r}, median={median!r}, std={std!r}, nan_count={nan_count!r} nan_proportion={nan_proportion!r}') hist = compute_histogram(data, column_name=column_name, column_type=ColumnType.FLOAT, min_value=minimum, max_value=maximum, n_bins=NUM_BINS, n_samples=n_samples - nan_count) return NumericalStatisticsItem(nan_count=nan_count, nan_proportion=nan_proportion, min=minimum, max=maximum, mean=mean, median=median, std=std, histogram=hist) def compute_and_prepare_response(self, data: pl.DataFrame) -> StatisticsPerColumnItem: stats = self.compute_statistics(data, column_name=self.name, n_samples=self.n_samples) return StatisticsPerColumnItem(column_name=self.name, column_type=ColumnType.FLOAT, column_statistics=stats) class IntColumn(Column): @classmethod def _compute_statistics(cls, data: pl.DataFrame, column_name: str, n_samples: int) -> NumericalStatisticsItem: (nan_count, nan_proportion) = nan_count_proportion(data, column_name, n_samples=n_samples) if nan_count == n_samples: return all_nan_statistics_item(n_samples) (minimum, maximum, mean, median, std) = min_max_mean_median_std(data, column_name) logging.debug(f'minimum={minimum!r}, maximum={maximum!r}, mean={mean!r}, median={median!r}, std={std!r}, nan_count={nan_count!r} nan_proportion={nan_proportion!r}') (minimum, maximum) = (int(minimum), int(maximum)) hist = compute_histogram(data, column_name=column_name, column_type=ColumnType.INT, min_value=minimum, max_value=maximum, n_bins=NUM_BINS, n_samples=n_samples - nan_count) return NumericalStatisticsItem(nan_count=nan_count, nan_proportion=nan_proportion, min=minimum, max=maximum, mean=mean, median=median, std=std, histogram=hist) def compute_and_prepare_response(self, data: pl.DataFrame) -> StatisticsPerColumnItem: stats = self.compute_statistics(data, column_name=self.name, n_samples=self.n_samples) return StatisticsPerColumnItem(column_name=self.name, column_type=ColumnType.INT, column_statistics=stats) class StringColumn(Column): transform_column = IntColumn @staticmethod def is_class(n_unique: int, n_samples: int) -> bool: return n_unique / n_samples <= MAX_PROPORTION_STRING_LABELS and n_unique <= MAX_NUM_STRING_LABELS or n_unique <= NUM_BINS @classmethod def compute_transformed_data(cls, data: pl.DataFrame, column_name: str, transformed_column_name: str) -> pl.DataFrame: return data.select(pl.col(column_name)).with_columns(pl.col(column_name).str.len_chars().alias(transformed_column_name)) @classmethod def _compute_statistics(cls, data: pl.DataFrame, column_name: str, n_samples: int) -> Union[CategoricalStatisticsItem, NumericalStatisticsItem]: (nan_count, nan_proportion) = nan_count_proportion(data, column_name, n_samples) n_unique = data[column_name].n_unique() if cls.is_class(n_unique, n_samples): labels2counts: dict[str, int] = value_counts(data, column_name) if nan_count != n_samples else {} logging.debug(f'n_unique={n_unique!r} nan_count={nan_count!r} nan_proportion={nan_proportion!r} labels2counts={labels2counts!r}') labels2counts.pop(None, None) return CategoricalStatisticsItem(nan_count=nan_count, nan_proportion=nan_proportion, no_label_count=0, no_label_proportion=0.0, n_unique=len(labels2counts), frequencies=labels2counts) lengths_column_name = f'{column_name}_len' lengths_df = cls.compute_transformed_data(data, column_name, transformed_column_name=lengths_column_name) lengths_stats: NumericalStatisticsItem = cls.transform_column.compute_statistics(lengths_df, column_name=lengths_column_name, n_samples=n_samples) return lengths_stats def compute_and_prepare_response(self, data: pl.DataFrame) -> StatisticsPerColumnItem: stats = self.compute_statistics(data, column_name=self.name, n_samples=self.n_samples) string_type = ColumnType.STRING_LABEL if 'frequencies' in stats else ColumnType.STRING_TEXT return StatisticsPerColumnItem(column_name=self.name, column_type=string_type, column_statistics=stats) class BoolColumn(Column): @classmethod def _compute_statistics(cls, data: pl.DataFrame, column_name: str, n_samples: int) -> BoolStatisticsItem: (nan_count, nan_proportion) = nan_count_proportion(data, column_name, n_samples) values2counts: dict[str, int] = value_counts(data, column_name) values2counts.pop(None, None) logging.debug(f'nan_count={nan_count!r} nan_proportion={nan_proportion!r} values2counts={values2counts!r}') return BoolStatisticsItem(nan_count=nan_count, nan_proportion=nan_proportion, frequencies={str(key): freq for (key, freq) in sorted(values2counts.items())}) def compute_and_prepare_response(self, data: pl.DataFrame) -> StatisticsPerColumnItem: stats = self.compute_statistics(data, column_name=self.name, n_samples=self.n_samples) return StatisticsPerColumnItem(column_name=self.name, column_type=ColumnType.BOOL, column_statistics=stats) class ListColumn(Column): transform_column = IntColumn @classmethod def compute_transformed_data(cls, data: pl.DataFrame, column_name: str, transformed_column_name: str) -> pl.DataFrame: return data.select(pl.col(column_name), pl.when(pl.col(column_name).is_not_null()).then(pl.col(column_name).list.len()).otherwise(pl.lit(None)).alias(transformed_column_name)) @classmethod def _compute_statistics(cls, data: pl.DataFrame, column_name: str, n_samples: int) -> NumericalStatisticsItem: (nan_count, nan_proportion) = nan_count_proportion(data, column_name, n_samples) if nan_count == n_samples: return all_nan_statistics_item(n_samples) lengths_column_name = f'{column_name}_len' lengths_df = cls.compute_transformed_data(data, column_name, lengths_column_name) lengths_stats: NumericalStatisticsItem = cls.transform_column.compute_statistics(lengths_df, column_name=lengths_column_name, n_samples=n_samples) return NumericalStatisticsItem(nan_count=nan_count, nan_proportion=nan_proportion, min=lengths_stats['min'], max=lengths_stats['max'], mean=lengths_stats['mean'], median=lengths_stats['median'], std=lengths_stats['std'], histogram=lengths_stats['histogram']) def compute_and_prepare_response(self, data: pl.DataFrame) -> StatisticsPerColumnItem: stats = self.compute_statistics(data, column_name=self.name, n_samples=self.n_samples) return StatisticsPerColumnItem(column_name=self.name, column_type=ColumnType.LIST, column_statistics=stats) class MediaColumn(Column): transform_column: type[Column] @classmethod def transform(cls, example: Optional[Union[bytes, dict[str, Any]]]) -> Any: raise NotImplementedError @classmethod def compute_transformed_data(cls, parquet_directory: Path, column_name: str, transform_func: Callable[[Any], Any]) -> list[Any]: parquet_files = list(parquet_directory.glob('*.parquet')) transformed_values = [] for filename in parquet_files: shard_items = pq.read_table(filename, columns=[column_name]).to_pydict()[column_name] shard_transformed_values = thread_map(transform_func, shard_items, desc=f'Transforming values of {cls.__name__} {column_name} for {filename.name}', leave=False) transformed_values.extend(shard_transformed_values) return transformed_values @classmethod def _compute_statistics(cls, parquet_directory: Path, column_name: str, n_samples: int) -> SupportedStatistics: transformed_values = cls.compute_transformed_data(parquet_directory, column_name, cls.transform) nan_count = sum((value is None for value in transformed_values)) if nan_count == n_samples: return all_nan_statistics_item(n_samples) nan_proportion = np.round(nan_count / n_samples, DECIMALS).item() if nan_count != 0 else 0.0 transformed_df = pl.from_dict({column_name: transformed_values}) transformed_stats: NumericalStatisticsItem = cls.transform_column.compute_statistics(data=transformed_df, column_name=column_name, n_samples=n_samples) return NumericalStatisticsItem(nan_count=nan_count, nan_proportion=nan_proportion, min=transformed_stats['min'], max=transformed_stats['max'], mean=transformed_stats['mean'], median=transformed_stats['median'], std=transformed_stats['std'], histogram=transformed_stats['histogram']) @classmethod def get_column_type(cls) -> ColumnType: return ColumnType(cls.__name__.split('Column')[0].lower()) def compute_and_prepare_response(self, parquet_directory: Path) -> StatisticsPerColumnItem: stats = self.compute_statistics(parquet_directory=parquet_directory, column_name=self.name, n_samples=self.n_samples) return StatisticsPerColumnItem(column_name=self.name, column_type=self.get_column_type(), column_statistics=stats) class AudioColumn(MediaColumn): transform_column = FloatColumn @staticmethod def get_duration(example: Optional[Union[bytes, dict[str, Any]]]) -> Optional[float]: if example is None: return None example_bytes = example['bytes'] if isinstance(example, dict) else example with io.BytesIO(example_bytes) as f: return librosa.get_duration(path=f) @classmethod def transform(cls, example: Optional[Union[bytes, dict[str, Any]]]) -> Optional[float]: return cls.get_duration(example) class ImageColumn(MediaColumn): transform_column = IntColumn @staticmethod def get_width(example: Optional[Union[bytes, dict[str, Any]]]) -> Optional[int]: image_shape = ImageColumn.get_shape(example) return image_shape[0] @staticmethod def get_shape(example: Optional[Union[bytes, dict[str, Any]]]) -> Union[tuple[None, None], tuple[int, int]]: if example is None: return (None, None) example_bytes = example['bytes'] if isinstance(example, dict) else example with io.BytesIO(example_bytes) as f: image = Image.open(f) return image.size @classmethod def transform(cls, example: Optional[Union[bytes, dict[str, Any]]]) -> Optional[int]: return cls.get_width(example) # File: dataset-viewer-main/services/worker/src/worker/utils.py import itertools import logging import os import warnings from collections.abc import Iterable from dataclasses import dataclass, field from itertools import count, islice from typing import Literal, Optional, TypeVar, Union, overload from urllib.parse import quote import PIL import requests from datasets import Dataset, DatasetInfo, DownloadConfig, Features, IterableDataset, load_dataset from datasets.utils.file_utils import SINGLE_FILE_COMPRESSION_EXTENSION_TO_PROTOCOL from huggingface_hub import HfFileSystem, HfFileSystemFile from huggingface_hub.hf_api import HfApi from huggingface_hub.utils._errors import RepositoryNotFoundError from libcommon.constants import CONFIG_SPLIT_NAMES_KIND, MAX_COLUMN_NAME_LENGTH from libcommon.dtos import RowsContent from libcommon.exceptions import ConfigNotFoundError, DatasetNotFoundError, DatasetWithScriptNotSupportedError, NormalRowsError, PreviousStepFormatError, SplitNotFoundError, StreamingRowsError, TooLongColumnNameError from libcommon.simple_cache import get_previous_step_or_raise from libcommon.utils import retry from pyarrow import ArrowInvalid MAX_IMAGE_PIXELS = 10000000000 @retry(on=[ConnectionError]) def get_rows(dataset: str, config: str, split: str, streaming: bool, rows_max_number: int, token: Union[bool, str, None]=False, column_names: Optional[list[str]]=None) -> RowsContent: download_config = DownloadConfig(delete_extracted=True) PIL.Image.MAX_IMAGE_PIXELS = MAX_IMAGE_PIXELS ds = load_dataset(dataset, name=config, split=split, streaming=streaming, token=token, download_config=download_config) if streaming: if not isinstance(ds, IterableDataset): raise TypeError('load_dataset should return an IterableDataset in streaming mode') elif not isinstance(ds, Dataset): raise TypeError('load_dataset should return a Dataset in normal mode') if column_names: ds = ds.select_columns(column_names) rows_plus_one = list(itertools.islice(ds, rows_max_number + 1)) rows = rows_plus_one[:rows_max_number] all_fetched = len(rows_plus_one) <= rows_max_number if all_fetched: logging.debug(f'all the rows in the split have been fetched ({len(rows_plus_one)})') else: logging.debug(f'the rows in the split have been truncated ({rows_max_number} rows)') return RowsContent(rows=rows, all_fetched=all_fetched, truncated_columns=[]) def get_rows_or_raise(dataset: str, config: str, split: str, rows_max_number: int, token: Union[bool, str, None], info: DatasetInfo, max_size_fallback: Optional[int]=None, column_names: Optional[list[str]]=[]) -> RowsContent: try: return get_rows(dataset=dataset, config=config, split=split, streaming=True, rows_max_number=rows_max_number, token=token, column_names=column_names) except Exception as err: if isinstance(err, ValueError) and 'trust_remote_code' in str(err): raise DatasetWithScriptNotSupportedError from err MAX_SIZE_FALLBACK = 100000000 if max_size_fallback: warnings.warn(f"The parameter 'max_size_fallback' is deprecated. The hard-coded value `{MAX_SIZE_FALLBACK}` will be used instead.", category=DeprecationWarning) if info.size_in_bytes is None or info.size_in_bytes > MAX_SIZE_FALLBACK: raise StreamingRowsError('Cannot load the dataset split (in streaming mode) to extract the first rows.', cause=err) from err try: return get_rows(dataset=dataset, config=config, split=split, streaming=False, rows_max_number=rows_max_number, token=token) except Exception as err: if isinstance(err, ValueError) and 'trust_remote_code' in str(err): raise DatasetWithScriptNotSupportedError from err raise NormalRowsError('Cannot load the dataset split (in normal download mode) to extract the first rows.', cause=err) from err def hf_hub_url(repo_id: str, filename: str, hf_endpoint: str, revision: str, url_template: str) -> str: return (hf_endpoint + url_template) % (repo_id, quote(revision, safe=''), filename) def hffs_parquet_url(repo_id: str, config: str, split_directory: str, filename: str) -> str: return f'hf://datasets/{repo_id}/{config}/{split_directory}/{filename}' def hf_hub_open_file(file_url: str, hf_endpoint: str, hf_token: Optional[str], revision: Optional[str]=None) -> HfFileSystemFile: fs = HfFileSystem(endpoint=hf_endpoint, token=hf_token) return fs.open(file_url, revision=revision) @retry(on=[ArrowInvalid], sleeps=[0.2, 1, 1, 10, 10, 10]) def retry_on_arrow_invalid_open_file(file_url: str, hf_endpoint: str, hf_token: Optional[str], revision: Optional[str]=None) -> HfFileSystemFile: return hf_hub_open_file(file_url=file_url, hf_endpoint=hf_endpoint, hf_token=hf_token, revision=revision) DATASET_TYPE = 'dataset' LIST_REPO_REFS_RETRY_SLEEPS = [1, 1, 1, 10, 10] LOCK_GIT_BRANCH_RETRY_SLEEPS = [1, 1, 1, 1, 1, 10, 10, 10, 10, 100] * 3 def create_branch(dataset: str, target_revision: str, hf_api: HfApi, committer_hf_api: HfApi) -> None: try: refs = retry(on=[requests.exceptions.ConnectionError], sleeps=LIST_REPO_REFS_RETRY_SLEEPS)(hf_api.list_repo_refs)(repo_id=dataset, repo_type=DATASET_TYPE) if all((ref.ref != target_revision for ref in refs.converts)): initial_commit = hf_api.list_repo_commits(repo_id=dataset, repo_type=DATASET_TYPE)[-1].commit_id committer_hf_api.create_branch(repo_id=dataset, branch=target_revision, repo_type=DATASET_TYPE, revision=initial_commit, exist_ok=True) except RepositoryNotFoundError as err: raise DatasetNotFoundError('The dataset does not exist on the Hub (was deleted during job).') from err def check_config_exists(dataset: str, config: str) -> None: config_names_response = get_previous_step_or_raise(kind='dataset-config-names', dataset=dataset) try: configs_content = config_names_response['content']['config_names'] except Exception as e: raise PreviousStepFormatError("Previous steps 'dataset-config-names' did not return the expected content.", e) from e if config not in [config_item['config'] for config_item in configs_content]: raise ConfigNotFoundError(f"Config '{config}' does not exist for dataset '{dataset}'") def check_split_exists(dataset: str, config: str, split: str) -> None: check_config_exists(dataset, config) split_names_response = get_previous_step_or_raise(kind='config-split-names', dataset=dataset, config=config) try: splits_content = split_names_response['content']['splits'] except Exception as e: raise PreviousStepFormatError("Previous step 'config-split-names' did not return the expected content.", e) from e if split not in [split_item['split'] for split_item in splits_content]: raise SplitNotFoundError(f"Split '{split}' does not exist for the config '{config}' of the dataset.") def get_split_names(dataset: str, config: str) -> set[str]: split_names_response = get_previous_step_or_raise(kind=CONFIG_SPLIT_NAMES_KIND, dataset=dataset, config=config) split_names_content = split_names_response['content'] if 'splits' not in split_names_content: raise PreviousStepFormatError("Previous step did not return the expected content: 'splits'.") if not isinstance(split_names_content['splits'], list): raise PreviousStepFormatError('Previous step did not return the expected content.', TypeError(f"'splits' should be a list, but got {type(split_names_content['splits'])}")) return {split_name_item['split'] for split_name_item in split_names_content['splits']} def raise_if_long_column_name(features: Optional[Features]) -> None: if features is None: return for feature_name in features: if len(feature_name) > MAX_COLUMN_NAME_LENGTH: short_name = feature_name[:MAX_COLUMN_NAME_LENGTH - 3] + '...' raise TooLongColumnNameError(f"Column name '{short_name}' is too long. It should be less than {MAX_COLUMN_NAME_LENGTH} characters.") T = TypeVar('T') @overload def batched(it: Iterable[T], n: int) -> Iterable[list[T]]: ... @overload def batched(it: Iterable[T], n: int, with_indices: Literal[False]) -> Iterable[list[T]]: ... @overload def batched(it: Iterable[T], n: int, with_indices: Literal[True]) -> Iterable[tuple[list[int], list[T]]]: ... def batched(it: Iterable[T], n: int, with_indices: bool=False) -> Union[Iterable[list[T]], Iterable[tuple[list[int], list[T]]]]: (it, indices) = (iter(it), count()) while (batch := list(islice(it, n))): yield ((list(islice(indices, len(batch))), batch) if with_indices else batch) FileExtensionTuple = tuple[str, Optional[str]] @dataclass class FileExtension: extension: str uncompressed_extension: Optional[str] = field(default=None) def get_tuples(self) -> list[FileExtensionTuple]: if self.uncompressed_extension: return [(self.extension, None), (self.uncompressed_extension, self.extension)] return [(self.extension, None)] def get_file_extension(filename: str, recursive: bool=True, clean: bool=True) -> FileExtension: [base, extension] = os.path.splitext(filename) extension = extension.lower() if clean: for symb in '?-_': extension = extension.split(symb)[0] if recursive and extension.lstrip('.') in SINGLE_FILE_COMPRESSION_EXTENSION_TO_PROTOCOL: uncompressed_extension = get_file_extension(base, recursive=False, clean=False) return FileExtension(extension=extension, uncompressed_extension=uncompressed_extension.extension) return FileExtension(extension=extension)