diff --git a/.gitattributes b/.gitattributes index d0e9e5edea25b674344768c7a056a4dfc800899e..38e4676a562dddda55b0919ed907c491671c1dbf 100644 --- a/.gitattributes +++ b/.gitattributes @@ -14,11 +14,10 @@ *.ot filter=lfs diff=lfs merge=lfs -text *.parquet filter=lfs diff=lfs merge=lfs -text *.pb filter=lfs diff=lfs merge=lfs -text -*.pkl filter=lfs diff=lfs merge=lfs -text *.pt filter=lfs diff=lfs merge=lfs -text *.pth filter=lfs diff=lfs merge=lfs -text *.rar filter=lfs diff=lfs merge=lfs -text -saved_model/**/* filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.tar.* filter=lfs diff=lfs merge=lfs -text *.tflite filter=lfs diff=lfs merge=lfs -text *.tgz filter=lfs diff=lfs merge=lfs -text @@ -26,4 +25,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zstandard filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +geneformer/gene_name_id_dict.pkl filter=lfs diff=lfs merge=lfs -text model.safetensors filter=lfs diff=lfs merge=lfs -text diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml deleted file mode 100644 index 329c392ef93028194018644abc5e66b0afdfdf11..0000000000000000000000000000000000000000 --- a/.pre-commit-config.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# See https://pre-commit.com for more information -# See https://pre-commit.com/hooks.html for more hooks -repos: -- repo: https://github.com/pre-commit/pre-commit-hooks - rev: v3.2.0 - hooks: - - id: trailing-whitespace - - id: end-of-file-fixer - - id: check-yaml - - id: check-added-large-files - - id: check-merge-conflict - - id: mixed-line-ending - - id: check-docstring-first -- repo: https://github.com/pycqa/isort - rev: 5.12.0 - hooks: - - id: isort - args: ["--profile", "black"] -- repo: https://github.com/astral-sh/ruff-pre-commit - # Ruff version. - rev: v0.1.4 - hooks: - # Run the Ruff linter. - - id: ruff - # Run the Ruff formatter. - - id: ruff-format diff --git a/.readthedocs.yaml b/.readthedocs.yaml deleted file mode 100644 index 8c50993305dc7ea3d1c8b2e6271afa1665762f78..0000000000000000000000000000000000000000 --- a/.readthedocs.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# Read the Docs configuration file - -# Required -version: 2 - -# Set the OS, Python version and other tools you might need -build: - os: ubuntu-22.04 - tools: - python: "3.10" - -# Build documentation in the "docs/" directory with Sphinx -sphinx: - configuration: docs/source/conf.py - -# Python requirements required build your documentation -python: - install: - - requirements: docs/requirements.txt diff --git a/MANIFEST.in b/MANIFEST.in index c3875d90a1e1ee1715279ba71ae3efc1a46643e8..7899a8fa49ff82e5a26f56212587d43308eddeb4 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,3 @@ -include geneformer/gene_median_dictionary_gc95M.pkl -include geneformer/gene_name_id_dict_gc95M.pkl -include geneformer/ensembl_mapping_dict_gc95M.pkl -include geneformer/token_dictionary_gc95M.pkl +include geneformer/gene_median_dictionary.pkl +include geneformer/token_dictionary.pkl +include geneformer/gene_name_id_dict.pkl diff --git a/README.md b/README.md index 2d1ad4375703f99e682e4293131484adeb939522..eda6505686262160fceb7997c86f0f2a41ce3969 100644 --- a/README.md +++ b/README.md @@ -1,43 +1,22 @@ --- datasets: ctheodoris/Genecorpus-30M license: apache-2.0 -tags: -- single-cell -- genomics --- # Geneformer -Geneformer is a foundational transformer model pretrained on a large-scale corpus of single cell transcriptomes to enable context-aware predictions in settings with limited data in network biology. +Geneformer is a foundation transformer model pretrained on a large-scale corpus of ~30 million single cell transcriptomes to enable context-aware predictions in settings with limited data in network biology. -- See [our manuscript](https://rdcu.be/ddrx0) for details of the original model trained on ~30 million transcriptomes in June 2021 and the initial report of our in silico perturbation and cell and gene classification strategies. -- See [our manuscript](https://www.biorxiv.org/content/10.1101/2024.08.16.608180v1.full.pdf) for details of the expanded model trained on ~95 million transcriptomes in April 2024 and our continual learning, multitask learning, and quantization strategies. -- See [geneformer.readthedocs.io](https://geneformer.readthedocs.io) for documentation. +See [our manuscript](https://rdcu.be/ddrx0) for details. # Model Description -Geneformer is a foundational transformer model pretrained on a large-scale corpus of single cell transcriptomes representing a broad range of human tissues. Geneformer was originally pretrained in June 2021 on [Genecorpus-30M](https://huggingface.co/datasets/ctheodoris/Genecorpus-30M), a corpus comprised of ~30 million single cell transcriptomes. We excluded cells with high mutational burdens (e.g. malignant cells and immortalized cell lines) that could lead to substantial network rewiring without companion genome sequencing to facilitate interpretation. Then, in April 2024, Geneformer was pretrained on ~95 million non-cancer transcriptomes, followed by continual learning on ~14 million cancer transcriptomes to yield a cancer domain-tuned model. +Geneformer is a foundation transformer model pretrained on [Genecorpus-30M](https://huggingface.co/datasets/ctheodoris/Genecorpus-30M), a pretraining corpus comprised of ~30 million single cell transcriptomes from a broad range of human tissues. We excluded cells with high mutational burdens (e.g. malignant cells and immortalized cell lines) that could lead to substantial network rewiring without companion genome sequencing to facilitate interpretation. Each single cell’s transcriptome is presented to the model as a rank value encoding where genes are ranked by their expression in that cell normalized by their expression across the entire Genecorpus-30M. The rank value encoding provides a nonparametric representation of that cell’s transcriptome and takes advantage of the many observations of each gene’s expression across Genecorpus-30M to prioritize genes that distinguish cell state. Specifically, this method will deprioritize ubiquitously highly-expressed housekeeping genes by normalizing them to a lower rank. Conversely, genes such as transcription factors that may be lowly expressed when they are expressed but highly distinguish cell state will move to a higher rank within the encoding. Furthermore, this rank-based approach may be more robust against technical artifacts that may systematically bias the absolute transcript counts value while the overall relative ranking of genes within each cell remains more stable. -Each single cell’s transcriptome is presented to the model as a rank value encoding where genes are ranked by their expression in that cell scaled by their expression across the entire Genecorpus-30M. The rank value encoding provides a nonparametric representation of that cell’s transcriptome and takes advantage of the many observations of each gene’s expression across the pretraining corpus to prioritize genes that distinguish cell state. Specifically, this method will deprioritize ubiquitously highly-expressed housekeeping genes by scaling them to a lower rank. Conversely, genes such as transcription factors that may be lowly expressed when they are expressed but highly distinguish cell state will move to a higher rank within the encoding. Furthermore, this rank-based approach may be more robust against technical artifacts that may systematically bias the absolute transcript counts value while the overall relative ranking of genes within each cell remains more stable. +The rank value encoding of each single cell’s transcriptome then proceeds through six transformer encoder units. Pretraining was accomplished using a masked learning objective where 15% of the genes within each transcriptome were masked and the model was trained to predict which gene should be within each masked position in that specific cell state using the context of the remaining unmasked genes. A major strength of this approach is that it is entirely self-supervised and can be accomplished on completely unlabeled data, which allows the inclusion of large amounts of training data without being restricted to samples with accompanying labels. -The rank value encoding of each single cell’s transcriptome then proceeds through N layers of transformer encoder units, where N varies dependent on the model size. Pretraining was accomplished using a masked learning objective where 15% of the genes within each transcriptome were masked and the model was trained to predict which gene should be within each masked position in that specific cell state using the context of the remaining unmasked genes. A major strength of this approach is that it is entirely self-supervised and can be accomplished on completely unlabeled data, which allows the inclusion of large amounts of training data without being restricted to samples with accompanying labels. +We detail applications and results in [our manuscript](https://rdcu.be/ddrx0). -We detail applications and results in [our manuscript](https://rdcu.be/ddrx0). +During pretraining, Geneformer gained a fundamental understanding of network dynamics, encoding network hierarchy in the model’s attention weights in a completely self-supervised manner. Fine-tuning Geneformer towards a diverse panel of downstream tasks relevant to chromatin and network dynamics using limited task-specific data demonstrated that Geneformer consistently boosted predictive accuracy. Applied to disease modeling with limited patient data, Geneformer identified candidate therapeutic targets. Overall, Geneformer represents a pretrained deep learning model from which fine-tuning towards a broad range of downstream applications can be pursued to accelerate discovery of key network regulators and candidate therapeutic targets. -During pretraining, Geneformer gained a fundamental understanding of network dynamics, encoding network hierarchy in the model’s attention weights in a completely self-supervised manner. With both zero-shot learning and fine-tuning with limited task-specific data, Geneformer consistently boosted predictive accuracy in a diverse panel of downstream tasks relevant to chromatin and network dynamics. In silico perturbation with zero-shot learning identified a novel transcription factor in cardiomyocytes that we experimentally validated to be critical to their ability to generate contractile force. In silico treatment with limited patient data revealed candidate therapeutic targets for cardiomyopathy that we experimentally validated to significantly improve the ability of cardiomyocytes to generate contractile force in an induced pluripotent stem cell (iPSC) model of the disease. Overall, Geneformer represents a foundational deep learning model pretrained on a large-scale corpus human single cell transcriptomes to gain a fundamental understanding of gene network dynamics that can now be democratized to a vast array of downstream tasks to accelerate discovery of key network regulators and candidate therapeutic targets. - -The repository includes the following pretrained models: - -L=layers\ -M=millions of cells used for pretraining\ -i=input size\ -(pretraining date) - -- GF-6L-30M-i2048 (June 2021) -- GF-12L-30M-i2048 (June 2021) -- GF-12L-95M-i4096 (April 2024) -- GF-20L-95M-i4096 (April 2024) - -The current default model in the main directory of the repository is GF-12L-95M-i4096. - -The repository also contains fined tuned models in the fine_tuned_models directory and the cancer-tuned model following continual learning on ~14 million cancer cells, GF-12L-95M-i4096_CLcancer. +In [our manuscript](https://rdcu.be/ddrx0), we report results for the 6 layer Geneformer model pretrained on Genecorpus-30M. We additionally provide within this repository a 12 layer Geneformer model, scaled up with retained width:depth aspect ratio, also pretrained on Genecorpus-30M. # Application The pretrained Geneformer model can be used directly for zero-shot learning, for example for in silico perturbation analysis, or by fine-tuning towards the relevant downstream task, such as gene or cell state classification. @@ -45,7 +24,7 @@ The pretrained Geneformer model can be used directly for zero-shot learning, for Example applications demonstrated in [our manuscript](https://rdcu.be/ddrx0) include: *Fine-tuning*: -- transcription factor dosage sensitivity +- transcription factor dosage sensitivity - chromatin dynamics (bivalently marked promoters) - transcription factor regulatory range - gene network centrality @@ -67,11 +46,9 @@ Example applications demonstrated in [our manuscript](https://rdcu.be/ddrx0) inc - in silico perturbation to determine transcription factor cooperativity # Installation -In addition to the pretrained model, contained herein are functions for tokenizing and collating data specific to single cell transcriptomics, pretraining the model, fine-tuning the model, extracting and plotting cell embeddings, and performing in silico pertrubation with either the pretrained or fine-tuned models. To install (~20s): +In addition to the pretrained model, contained herein are functions for tokenizing and collating data specific to single cell transcriptomics, pretraining the model, fine-tuning the model, extracting and plotting cell embeddings, and performing in silico pertrubation with either the pretrained or fine-tuned models. To install: ```bash -# Make sure you have git-lfs installed (https://git-lfs.com) -git lfs install git clone https://huggingface.co/ctheodoris/Geneformer cd Geneformer pip install . @@ -85,10 +62,6 @@ For usage, see [examples](https://huggingface.co/ctheodoris/Geneformer/tree/main - extracting and plotting cell embeddings - in silico perturbation -Please note that the fine-tuning examples are meant to be generally applicable and the input datasets and labels will vary dependent on the downstream task. Example input files for a few of the downstream tasks demonstrated in the manuscript are located within the [example_input_files directory](https://huggingface.co/datasets/ctheodoris/Genecorpus-30M/tree/main/example_input_files) in the dataset repository, but these only represent a few example fine-tuning applications. - -Please note that GPU resources are required for efficient usage of Geneformer. Additionally, we strongly recommend tuning hyperparameters for each downstream fine-tuning application as this can significantly boost predictive potential in the downstream task (e.g. max learning rate, learning schedule, number of layers to freeze, etc.). +Please note that the fine-tuning examples are meant to be generally applicable and the input datasets and labels will vary dependent on the downstream task. Example input files for a few of the downstream tasks demonstrated in the manuscript are located within the [example_input_files directory](https://huggingface.co/datasets/ctheodoris/Genecorpus-30M/tree/main/example_input_files) in the dataset repository, but these only represent a few example fine-tuning applications. -# Citations -- C V Theodoris#, L Xiao, A Chopra, M D Chaffin, Z R Al Sayed, M C Hill, H Mantineo, E Brydon, Z Zeng, X S Liu, P T Ellinor#. Transfer learning enables predictions in network biology. _**Nature**_, 31 May 2023. (#co-corresponding authors) -- H Chen*, M S Venkatesh*, J Gomez Ortega, S V Mahesh, T Nandi, R Madduri, K Pelka†, C V Theodoris†#. Quantized multi-task learning for context-specific representations of gene network dynamics. _**bioRxiv**_, 19 Aug 2024. (*co-first authors, †co-senior authors, #corresponding author) \ No newline at end of file +Please note that GPU resources are required for efficient usage of Geneformer. Additionally, we strongly recommend tuning hyperparameters for each downstream fine-tuning application as this can significantly boost predictive potential in the downstream task (e.g. max learning rate, learning schedule, number of layers to freeze, etc.). \ No newline at end of file diff --git a/config.json b/config.json index 86e20c35e6f257f0daeb00ebb92a0751d12d8fff..d131b7026d684013f988cc9e3dcae2e5a284bc0e 100644 --- a/config.json +++ b/config.json @@ -3,22 +3,21 @@ "BertForMaskedLM" ], "attention_probs_dropout_prob": 0.02, - "classifier_dropout": null, + "gradient_checkpointing": false, "hidden_act": "relu", "hidden_dropout_prob": 0.02, - "hidden_size": 512, + "hidden_size": 256, "initializer_range": 0.02, - "intermediate_size": 1024, + "intermediate_size": 512, "layer_norm_eps": 1e-12, - "max_position_embeddings": 4096, + "max_position_embeddings": 2048, "model_type": "bert", - "num_attention_heads": 8, - "num_hidden_layers": 12, + "num_attention_heads": 4, + "num_hidden_layers": 6, "pad_token_id": 0, "position_embedding_type": "absolute", - "torch_dtype": "float32", - "transformers_version": "4.37.1", + "transformers_version": "4.6.0", "type_vocab_size": 2, "use_cache": true, - "vocab_size": 20275 + "vocab_size": 25426 } diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index d0c3cbf1020d5c292abdedf27627c6abe25e2293..0000000000000000000000000000000000000000 --- a/docs/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line, and also -# from the environment for the first two. -SPHINXOPTS ?= -SPHINXBUILD ?= sphinx-build -SOURCEDIR = source -BUILDDIR = build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/make.bat b/docs/make.bat deleted file mode 100644 index 747ffb7b3033659bdd2d1e6eae41ecb00358a45e..0000000000000000000000000000000000000000 --- a/docs/make.bat +++ /dev/null @@ -1,35 +0,0 @@ -@ECHO OFF - -pushd %~dp0 - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set SOURCEDIR=source -set BUILDDIR=build - -%SPHINXBUILD% >NUL 2>NUL -if errorlevel 9009 ( - echo. - echo.The 'sphinx-build' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-build' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.https://www.sphinx-doc.org/ - exit /b 1 -) - -if "%1" == "" goto help - -%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% -goto end - -:help -%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% - -:end -popd diff --git a/docs/requirements.txt b/docs/requirements.txt deleted file mode 100644 index d4b51ede80c4f16d12cac47ffe5d17e496a3addd..0000000000000000000000000000000000000000 --- a/docs/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -. -sphinx_rtd_theme==2.0.0 -nbsphinx==0.9.3 diff --git a/docs/source/_static/css/custom.css b/docs/source/_static/css/custom.css deleted file mode 100644 index 1c6748950c328c423cad4a9a039f6477ea19cc4c..0000000000000000000000000000000000000000 --- a/docs/source/_static/css/custom.css +++ /dev/null @@ -1,40 +0,0 @@ -/* top left logo */ -.wy-side-nav-search, .wy-nav-top { - background: linear-gradient(15deg, #13547a 0%, #80d0c7 100%); -} - - -/* unvisited link */ -.wy-nav-content a:link { - color: #067abd; -} - -/* visited link */ -.wy-nav-content a:visited { - color: #4b827c; -} - -/* mouse over link */ -.wy-nav-content a:hover { - color: #80d0c7; -} - -/* selected link */ -.wy-nav-content a:active { - color: #4b827c; -} - -/* class object */ -.sig.sig-object { - padding: 5px 5px 5px 5px; - background-color: #ececec; - border-style: solid; - border-color: black; - border-width: 1px 0; -} - -/* parameter object */ -dt { - padding: 5px 5px 5px 5px; - background-color: #ececec; -} diff --git a/docs/source/_static/gf_logo.png b/docs/source/_static/gf_logo.png deleted file mode 100644 index 68fd0aac123094bdfd9bae1356e6c0012bded8a0..0000000000000000000000000000000000000000 Binary files a/docs/source/_static/gf_logo.png and /dev/null differ diff --git a/docs/source/about.rst b/docs/source/about.rst deleted file mode 100644 index 7e5a53453d0a3a4ed59f12b4191e17d3d82d4411..0000000000000000000000000000000000000000 --- a/docs/source/about.rst +++ /dev/null @@ -1,49 +0,0 @@ -About -===== - -Model Description ------------------ - -**Geneformer** is a context-aware, attention-based deep learning model pretrained on a large-scale corpus of single-cell transcriptomes to enable context-specific predictions in settings with limited data in network biology. During pretraining, Geneformer gained a fundamental understanding of network dynamics, encoding network hierarchy in the attention weights of the model in a completely self-supervised manner. With both zero-shot learning and fine-tuning with limited task-specific data, Geneformer consistently boosted predictive accuracy in a diverse panel of downstream tasks relevant to chromatin and network dynamics. In silico perturbation with zero-shot learning identified a novel transcription factor in cardiomyocytes that we experimentally validated to be critical to their ability to generate contractile force. In silico treatment with limited patient data revealed candidate therapeutic targets for cardiomyopathy that we experimentally validated to significantly improve the ability of cardiomyocytes to generate contractile force in an iPSC model of the disease. Overall, Geneformer represents a foundational deep learning model pretrained on a large-scale corpus of human single cell transcriptomes to gain a fundamental understanding of gene network dynamics that can now be democratized to a vast array of downstream tasks to accelerate discovery of key network regulators and candidate therapeutic targets. - -In `our manuscript `_, we report results for the original 6 layer Geneformer model pretrained on Genecorpus-30M. We additionally provide within the repository a 12 layer Geneformer model, scaled up with retained width:depth aspect ratio, also pretrained on Genecorpus-30M. - -Both the `6 `_ and `12 `_ layer Geneformer models were pretrained in June 2021. - -Also see `our 2024 manuscript `_, for details of the `expanded model `_ trained on ~95 million transcriptomes in April 2024 and our continual learning, multitask learning, and quantization strategies. - -Application ------------ - -The pretrained Geneformer model can be used directly for zero-shot learning, for example for in silico perturbation analysis, or by fine-tuning towards the relevant downstream task, such as gene or cell state classification. - -Example applications demonstrated in `our manuscript `_ include: - -| *Fine-tuning*: -| - transcription factor dosage sensitivity -| - chromatin dynamics (bivalently marked promoters) -| - transcription factor regulatory range -| - gene network centrality -| - transcription factor targets -| - cell type annotation -| - batch integration -| - cell state classification across differentiation -| - disease classification -| - in silico perturbation to determine disease-driving genes -| - in silico treatment to determine candidate therapeutic targets - -| *Zero-shot learning*: -| - batch integration -| - gene context specificity -| - in silico reprogramming -| - in silico differentiation -| - in silico perturbation to determine impact on cell state -| - in silico perturbation to determine transcription factor targets -| - in silico perturbation to determine transcription factor cooperativity - -Citations ---------- - -| C V Theodoris #, L Xiao, A Chopra, M D Chaffin, Z R Al Sayed, M C Hill, H Mantineo, E Brydon, Z Zeng, X S Liu, P T Ellinor #. `Transfer learning enables predictions in network biology. `_ *Nature*, 31 May 2023. (# co-corresponding authors) - -| H Chen \*, M S Venkatesh \*, J Gomez Ortega, S V Mahesh, T Nandi, R Madduri, K Pelka †, C V Theodoris † #. `Quantized multi-task learning for context-specific representations of gene network dynamics. `_ *bioRxiv*, 19 Aug 2024. (\* co-first authors, † co-senior authors, # corresponding author) diff --git a/docs/source/api.rst b/docs/source/api.rst deleted file mode 100644 index 36817a1c1ce42a95485eefaa6c2ad1dad0cb78db..0000000000000000000000000000000000000000 --- a/docs/source/api.rst +++ /dev/null @@ -1,51 +0,0 @@ -API -=== - -Tokenizer ---------- - -.. toctree:: - :maxdepth: 1 - - geneformer.tokenizer - -Classifier ----------- - -.. toctree:: - :maxdepth: 1 - - geneformer.classifier - -Multitask Classifier --------------------- - -.. toctree:: - :maxdepth: 1 - - geneformer.mtl_classifier - -Embedding Extractor -------------------- - -.. toctree:: - :maxdepth: 1 - - geneformer.emb_extractor - -In Silico Perturber -------------------- - -.. toctree:: - :maxdepth: 1 - - geneformer.in_silico_perturber - - -In Silico Perturber Stats -------------------------- - -.. toctree:: - :maxdepth: 1 - - geneformer.in_silico_perturber_stats diff --git a/docs/source/conf.py b/docs/source/conf.py deleted file mode 100644 index 37b658f688ddc54230e18687d43ae4618fdd9ddd..0000000000000000000000000000000000000000 --- a/docs/source/conf.py +++ /dev/null @@ -1,80 +0,0 @@ -# Configuration file for the Sphinx documentation builder. -# -# For the full list of built-in configuration values, see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html - -import pathlib -import re -import sys - -from sphinx.ext import autodoc - -sys.path.insert(0, pathlib.Path(__file__).parents[2].resolve().as_posix()) - - -# -- Project information ----------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information - -project = "geneformer" -copyright = "2024, Christina Theodoris" -author = "Christina Theodoris" -release = "0.1.0" -repository_url = "https://huggingface.co/ctheodoris/Geneformer" - -# -- General configuration --------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration - -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "nbsphinx", - "sphinx.ext.viewcode", - "sphinx.ext.doctest", -] - -templates_path = ["_templates"] -exclude_patterns = [ - "**.ipynb_checkpoints", -] -autoclass_content = "both" - - -class MockedClassDocumenter(autodoc.ClassDocumenter): - def add_line(self, line: str, source: str, *lineno: int) -> None: - if line == " Bases: :py:class:`object`": - return - super().add_line(line, source, *lineno) - - -autodoc.ClassDocumenter = MockedClassDocumenter -add_module_names = False - - -def process_signature(app, what, name, obj, options, signature, return_annotation): - # loop through each line in the docstring and replace path with - # the generic path text - signature = re.sub(r"PosixPath\(.*?\)", "FILEPATH", signature) - return (signature, None) - - -def setup(app): - app.connect("autodoc-process-signature", process_signature) - - -# -- Options for HTML output ------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output - -html_theme = "sphinx_rtd_theme" -html_show_sphinx = False -html_static_path = ["_static"] -html_logo = "_static/gf_logo.png" -html_theme_options = { - "collapse_navigation": False, - "sticky_navigation": True, - "navigation_depth": 3, - "logo_only": True, -} -html_css_files = [ - "css/custom.css", -] -html_show_sourcelink = False diff --git a/docs/source/geneformer.classifier.rst b/docs/source/geneformer.classifier.rst deleted file mode 100644 index cf3548519d6e5b8df963ede9918e944053b92493..0000000000000000000000000000000000000000 --- a/docs/source/geneformer.classifier.rst +++ /dev/null @@ -1,10 +0,0 @@ -geneformer.classifier -===================== - -.. automodule:: geneformer.classifier - :members: - :undoc-members: - :show-inheritance: - :exclude-members: - valid_option_dict, - validate_options diff --git a/docs/source/geneformer.emb_extractor.rst b/docs/source/geneformer.emb_extractor.rst deleted file mode 100644 index 0f602294b47f598dde04e16ab2fa0c51ecc43dac..0000000000000000000000000000000000000000 --- a/docs/source/geneformer.emb_extractor.rst +++ /dev/null @@ -1,26 +0,0 @@ -geneformer.emb\_extractor -========================= - -.. automodule:: geneformer.emb_extractor - :members: - :undoc-members: - :show-inheritance: - :exclude-members: - accumulate_tdigests, - gen_heatmap_class_colors, - gen_heatmap_class_dict, - get_embs, - label_cell_embs, - label_gene_embs, - make_colorbar, - plot_heatmap, - plot_umap, - summarize_gene_embs, - tdigest_mean, - tdigest_median, - test_emb, - update_tdigest_dict, - update_tdigest_dict_mean, - update_tdigest_dict_median, - valid_option_dict, - validate_options diff --git a/docs/source/geneformer.in_silico_perturber.rst b/docs/source/geneformer.in_silico_perturber.rst deleted file mode 100644 index fab76dea3c46244ab15d3d77552bc538535675e5..0000000000000000000000000000000000000000 --- a/docs/source/geneformer.in_silico_perturber.rst +++ /dev/null @@ -1,8 +0,0 @@ -geneformer.in\_silico\_perturber -======================================= - -.. automodule:: geneformer.in_silico_perturber - :members: - :undoc-members: - :show-inheritance: - :exclude-members: valid_option_dict, validate_options, apply_additional_filters, isp_perturb_all, isp_perturb_set, update_perturbation_dictionary diff --git a/docs/source/geneformer.in_silico_perturber_stats.rst b/docs/source/geneformer.in_silico_perturber_stats.rst deleted file mode 100644 index 97d8f170017ead706fd9160fb622c6debc3b3a1a..0000000000000000000000000000000000000000 --- a/docs/source/geneformer.in_silico_perturber_stats.rst +++ /dev/null @@ -1,25 +0,0 @@ -geneformer.in\_silico\_perturber\_stats -============================================== - -.. automodule:: geneformer.in_silico_perturber_stats - :members: - :undoc-members: - :show-inheritance: - :exclude-members: - find, - get_fdr, - get_gene_list, - get_impact_component, - invert_dict, - isp_aggregate_gene_shifts, - isp_aggregate_grouped_perturb, - isp_stats_mixture_model, - isp_stats_to_goal_state, - isp_stats_vs_null, - n_detections, - read_dict, - read_dictionaries, - token_to_gene_name, - token_tuple_to_ensembl_ids, - valid_option_dict, - validate_options diff --git a/docs/source/geneformer.mtl_classifier.rst b/docs/source/geneformer.mtl_classifier.rst deleted file mode 100644 index b67c1d30bc13926095c8d5d021e68f5146aff2e1..0000000000000000000000000000000000000000 --- a/docs/source/geneformer.mtl_classifier.rst +++ /dev/null @@ -1,11 +0,0 @@ -geneformer.mtl\_classifier -========================== - -.. automodule:: geneformer.mtl_classifier - :members: - :undoc-members: - :show-inheritance: - :exclude-members: - valid_option_dict, - validate_options, - validate_additional_options diff --git a/docs/source/geneformer.tokenizer.rst b/docs/source/geneformer.tokenizer.rst deleted file mode 100644 index b8150d3312ff7eddd56183604e952aa3b06798bc..0000000000000000000000000000000000000000 --- a/docs/source/geneformer.tokenizer.rst +++ /dev/null @@ -1,15 +0,0 @@ -geneformer.tokenizer -==================== - -.. automodule:: geneformer.tokenizer - :members: - :undoc-members: - :show-inheritance: - :exclude-members: - create_dataset, - tokenize_anndata, - tokenize_files, - tokenize_loom, - rank_genes, - tokenize_cell, - sum_ensembl_ids diff --git a/docs/source/getstarted.rst b/docs/source/getstarted.rst deleted file mode 100644 index fb0d853bc29cb961a844add7b0dede9891ce8689..0000000000000000000000000000000000000000 --- a/docs/source/getstarted.rst +++ /dev/null @@ -1,36 +0,0 @@ -Getting Started -=============== - -Installation ------------- - -Geneformer installation instructions. - -Make sure you have git-lfs installed (https://git-lfs.com). - -.. code-block:: bash - - git lfs install - git clone https://huggingface.co/ctheodoris/Geneformer - cd Geneformer - pip install . - - -Tutorials ---------- - -| See `examples `_ for: -| - tokenizing transcriptomes -| - pretraining -| - hyperparameter tuning -| - fine-tuning -| - extracting and plotting cell embeddings -| - in silico perturbation - -Please note that the fine-tuning examples are meant to be generally applicable and the input datasets and labels will vary dependent on the downstream task. Example input files for a few of the downstream tasks demonstrated in the manuscript are located within the `example_input_files directory `_ in the dataset repository, but these only represent a few example fine-tuning applications. - - -Tips ----- - -Please note that GPU resources are required for efficient usage of Geneformer. Additionally, we strongly recommend tuning hyperparameters for each downstream fine-tuning application as this can significantly boost predictive potential in the downstream task (e.g. max learning rate, learning schedule, number of layers to freeze, etc.). diff --git a/docs/source/index.rst b/docs/source/index.rst deleted file mode 100644 index 102a5861bc63fccb4ba295afd437fc461dda0d42..0000000000000000000000000000000000000000 --- a/docs/source/index.rst +++ /dev/null @@ -1,16 +0,0 @@ -Geneformer -========== - -Geneformer is a foundation transformer model pretrained on a large-scale corpus of single cell transcriptomes to enable context-aware predictions in network biology. - -See `our manuscript `_ for details. - -Table of Contents ------------------ - -.. toctree:: - :maxdepth: 2 - - about - getstarted - api diff --git a/examples/cell_classification.ipynb b/examples/cell_classification.ipynb index 321187b9959abe460c6efc34996d6db0cf3488ed..9f087fd63d5b26351d67a093fd3a5409e18392e7 100644 --- a/examples/cell_classification.ipynb +++ b/examples/cell_classification.ipynb @@ -2,191 +2,583 @@ "cells": [ { "cell_type": "markdown", - "id": "65a2b29a-c678-4874-a1bf-5af3a7d00ed9", + "id": "234afff3", "metadata": {}, "source": [ - "## Geneformer Fine-Tuning for Classification of Cardiomyopathy Disease States" + "## Geneformer Fine-Tuning for Cell Annotation Application" ] }, { - "cell_type": "markdown", - "id": "1792e51c-86c3-406f-be5a-273c4e4aec20", + "cell_type": "code", + "execution_count": 2, + "id": "1cbe6178-ea4d-478a-80a8-65ffaa4c1820", "metadata": {}, + "outputs": [], "source": [ - "### Please note that, as usual with deep learning models, we **highly** recommend tuning learning hyperparameters for all fine-tuning applications as this can significantly improve model performance. Example below uses previously optimized hyperparameters, but one can optimize hyperparameters with the argument n_hyperopt_trials=n in cc.validate() where n>0 and represents the number of trials for hyperparameter optimization." + "import os\n", + "GPU_NUMBER = [0]\n", + "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \",\".join([str(s) for s in GPU_NUMBER])\n", + "os.environ[\"NCCL_DEBUG\"] = \"INFO\"" ] }, { - "cell_type": "markdown", - "id": "3dad7564-b464-4d37-9188-17c0ae4ae59f", + "cell_type": "code", + "execution_count": 3, + "id": "a9885d9f-00ac-4c84-b6a3-b7b648a90f0f", "metadata": {}, + "outputs": [], "source": [ - "### Train cell classifier with 70% of data (with hyperparameters previously optimized based on 15% of data as validation set) and evaluate on held-out test set of 15% of data" + "# imports\n", + "from collections import Counter\n", + "import datetime\n", + "import pickle\n", + "import subprocess\n", + "import seaborn as sns; sns.set()\n", + "from datasets import load_from_disk\n", + "from sklearn.metrics import accuracy_score, f1_score\n", + "from transformers import BertForSequenceClassification\n", + "from transformers import Trainer\n", + "from transformers.training_args import TrainingArguments\n", + "\n", + "from geneformer import DataCollatorForCellClassification" ] }, { "cell_type": "markdown", - "id": "9027e51e-7830-4ab8-aebf-b9779b3ea2c1", + "id": "68bd3b98-5409-4105-b7af-f1ff64ea6a72", "metadata": {}, "source": [ - "### Fine-tune the model for cell state classification" + "## Prepare training and evaluation datasets" ] }, { "cell_type": "code", - "execution_count": 2, - "id": "efe3b79b-aa8f-416c-9755-7f9299d6a81e", + "execution_count": 15, + "id": "5735f1b7-7595-4a02-be17-2c5b970ad81a", "metadata": {}, "outputs": [], "source": [ - "import datetime\n", - "from geneformer import Classifier\n", + "# load cell type dataset (includes all tissues)\n", + "train_dataset=load_from_disk(\"/path/to/cell_type_train_data.dataset\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a4297a02-4c4c-434c-ae55-3387a0b239b5", + "metadata": { + "collapsed": true, + "jupyter": { + "outputs_hidden": true + }, + "tags": [] + }, + "outputs": [], + "source": [ + "dataset_list = []\n", + "evalset_list = []\n", + "organ_list = []\n", + "target_dict_list = []\n", "\n", - "current_date = datetime.datetime.now()\n", - "datestamp = f\"{str(current_date.year)[-2:]}{current_date.month:02d}{current_date.day:02d}{current_date.hour:02d}{current_date.minute:02d}{current_date.second:02d}\"\n", - "datestamp_min = f\"{str(current_date.year)[-2:]}{current_date.month:02d}{current_date.day:02d}\"\n", + "for organ in Counter(train_dataset[\"organ_major\"]).keys():\n", + " # collect list of tissues for fine-tuning (immune and bone marrow are included together)\n", + " if organ in [\"bone_marrow\"]: \n", + " continue\n", + " elif organ==\"immune\":\n", + " organ_ids = [\"immune\",\"bone_marrow\"]\n", + " organ_list += [\"immune\"]\n", + " else:\n", + " organ_ids = [organ]\n", + " organ_list += [organ]\n", + " \n", + " print(organ)\n", + " \n", + " # filter datasets for given organ\n", + " def if_organ(example):\n", + " return example[\"organ_major\"] in organ_ids\n", + " trainset_organ = train_dataset.filter(if_organ, num_proc=16)\n", + " \n", + " # per scDeepsort published method, drop cell types representing <0.5% of cells\n", + " celltype_counter = Counter(trainset_organ[\"cell_type\"])\n", + " total_cells = sum(celltype_counter.values())\n", + " cells_to_keep = [k for k,v in celltype_counter.items() if v>(0.005*total_cells)]\n", + " def if_not_rare_celltype(example):\n", + " return example[\"cell_type\"] in cells_to_keep\n", + " trainset_organ_subset = trainset_organ.filter(if_not_rare_celltype, num_proc=16)\n", + " \n", + " # shuffle datasets and rename columns\n", + " trainset_organ_shuffled = trainset_organ_subset.shuffle(seed=42)\n", + " trainset_organ_shuffled = trainset_organ_shuffled.rename_column(\"cell_type\",\"label\")\n", + " trainset_organ_shuffled = trainset_organ_shuffled.remove_columns(\"organ_major\")\n", + " \n", + " # create dictionary of cell types : label ids\n", + " target_names = list(Counter(trainset_organ_shuffled[\"label\"]).keys())\n", + " target_name_id_dict = dict(zip(target_names,[i for i in range(len(target_names))]))\n", + " target_dict_list += [target_name_id_dict]\n", + " \n", + " # change labels to numerical ids\n", + " def classes_to_ids(example):\n", + " example[\"label\"] = target_name_id_dict[example[\"label\"]]\n", + " return example\n", + " labeled_trainset = trainset_organ_shuffled.map(classes_to_ids, num_proc=16)\n", + " \n", + " # create 80/20 train/eval splits\n", + " labeled_train_split = labeled_trainset.select([i for i in range(0,round(len(labeled_trainset)*0.8))])\n", + " labeled_eval_split = labeled_trainset.select([i for i in range(round(len(labeled_trainset)*0.8),len(labeled_trainset))])\n", + " \n", + " # filter dataset for cell types in corresponding training set\n", + " trained_labels = list(Counter(labeled_train_split[\"label\"]).keys())\n", + " def if_trained_label(example):\n", + " return example[\"label\"] in trained_labels\n", + " labeled_eval_split_subset = labeled_eval_split.filter(if_trained_label, num_proc=16)\n", "\n", - "output_prefix = \"cm_classifier_test\"\n", - "output_dir = f\"/path/to/output_dir/{datestamp}\"\n", - "!mkdir $output_dir" + " dataset_list += [labeled_train_split]\n", + " evalset_list += [labeled_eval_split_subset]" ] }, { "cell_type": "code", - "execution_count": 3, - "id": "f070ab20-1b18-4941-a5c7-89e23b519261", + "execution_count": 20, + "id": "83e20521-597a-4c54-897b-c4d42ea622c2", + "metadata": {}, + "outputs": [], + "source": [ + "trainset_dict = dict(zip(organ_list,dataset_list))\n", + "traintargetdict_dict = dict(zip(organ_list,target_dict_list))\n", + "\n", + "evalset_dict = dict(zip(organ_list,evalset_list))" + ] + }, + { + "cell_type": "markdown", + "id": "10eb110d-ba43-4efc-bc43-1815d6912647", + "metadata": {}, + "source": [ + "## Fine-Tune With Cell Classification Learning Objective and Quantify Predictive Performance" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "cd7b1cfb-f5cb-460e-ae77-769522ece054", + "metadata": {}, + "outputs": [], + "source": [ + "def compute_metrics(pred):\n", + " labels = pred.label_ids\n", + " preds = pred.predictions.argmax(-1)\n", + " # calculate accuracy and macro f1 using sklearn's function\n", + " acc = accuracy_score(labels, preds)\n", + " macro_f1 = f1_score(labels, preds, average='macro')\n", + " return {\n", + " 'accuracy': acc,\n", + " 'macro_f1': macro_f1\n", + " }" + ] + }, + { + "cell_type": "markdown", + "id": "beaab7a4-cc13-4e8f-b137-ed18ff7b633c", + "metadata": {}, + "source": [ + "### Please note that, as usual with deep learning models, we **highly** recommend tuning learning hyperparameters for all fine-tuning applications as this can significantly improve model performance. Example hyperparameters are defined below, but please see the \"hyperparam_optimiz_for_disease_classifier\" script for an example of how to tune hyperparameters for downstream applications." + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "d24e1ab7-0131-44bd-b458-1ce5ba31853e", "metadata": {}, "outputs": [], "source": [ - "filter_data_dict={\"cell_type\":[\"Cardiomyocyte1\",\"Cardiomyocyte2\",\"Cardiomyocyte3\"]}\n", - "training_args = {\n", - " \"num_train_epochs\": 0.9,\n", - " \"learning_rate\": 0.000804,\n", - " \"lr_scheduler_type\": \"polynomial\",\n", - " \"warmup_steps\": 1812,\n", - " \"weight_decay\":0.258828,\n", - " \"per_device_train_batch_size\": 12,\n", - " \"seed\": 73,\n", - "}\n", + "# set model parameters\n", + "# max input size\n", + "max_input_size = 2 ** 11 # 2048\n", "\n", - "# OF NOTE: token_dictionary_file must be set to the gc-30M token dictionary if using a 30M series model\n", - "# (otherwise the Classifier will use the current default model dictionary)\n", - "# 30M token dictionary: https://huggingface.co/ctheodoris/Geneformer/blob/main/geneformer/gene_dictionaries_30m/token_dictionary_gc30M.pkl\n", - "cc = Classifier(classifier=\"cell\",\n", - " cell_state_dict = {\"state_key\": \"disease\", \"states\": \"all\"},\n", - " filter_data=filter_data_dict,\n", - " training_args=training_args,\n", - " max_ncells=None,\n", - " freeze_layers = 2,\n", - " num_crossval_splits = 1,\n", - " forward_batch_size=200,\n", - " nproc=16)" + "# set training hyperparameters\n", + "# max learning rate\n", + "max_lr = 5e-5\n", + "# how many pretrained layers to freeze\n", + "freeze_layers = 0\n", + "# number gpus\n", + "num_gpus = 1\n", + "# number cpu cores\n", + "num_proc = 16\n", + "# batch size for training and eval\n", + "geneformer_batch_size = 12\n", + "# learning schedule\n", + "lr_schedule_fn = \"linear\"\n", + "# warmup steps\n", + "warmup_steps = 500\n", + "# number of epochs\n", + "epochs = 10\n", + "# optimizer\n", + "optimizer = \"adamw\"" ] }, { "cell_type": "code", - "execution_count": 4, - "id": "0bced2e8-0a49-418e-a7f9-3981be256bd6", + "execution_count": 20, + "id": "05164c24-5fbf-4372-b26c-a43f3777a88d", "metadata": {}, "outputs": [ { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "9c409ca656ed4cb0b280d95e326c1bc7", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Saving the dataset (0/3 shards): 0%| | 0/115367 [00:00:54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n" + ] }, { "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "facb7207b57948aebb3f8681346e17d4", - "version_major": 2, - "version_minor": 0 - }, + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " [10280/10280 13:33, Epoch 10/10]\n", + "
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
EpochTraining LossValidation LossAccuracyMacro F1Weighted F1
10.0870000.0680670.9854040.9568390.985483
20.0444000.0752890.9850790.9550690.984898
30.0667000.0787030.9837820.9532400.983959
40.0374000.0571320.9899450.9706190.989883
50.0250000.0616440.9883230.9611260.988211
60.0224000.0653230.9892960.9697370.989362
70.0186000.0637100.9896200.9694360.989579
80.0398000.0659190.9899450.9680650.989802
90.0302000.0613590.9902690.9717000.990314
100.0134000.0591810.9915670.9745990.991552

" + ], "text/plain": [ - "Saving the dataset (0/1 shards): 0%| | 0/17228 [00:00" ] }, "metadata": {}, "output_type": "display_data" - } - ], - "source": [ - "# previously balanced splits with prepare_data and validate functions\n", - "# argument attr_to_split set to \"individual\" and attr_to_balance set to [\"disease\",\"lvef\",\"age\",\"sex\",\"length\"]\n", - "train_ids = [\"1447\", \"1600\", \"1462\", \"1558\", \"1300\", \"1508\", \"1358\", \"1678\", \"1561\", \"1304\", \"1610\", \"1430\", \"1472\", \"1707\", \"1726\", \"1504\", \"1425\", \"1617\", \"1631\", \"1735\", \"1582\", \"1722\", \"1622\", \"1630\", \"1290\", \"1479\", \"1371\", \"1549\", \"1515\"]\n", - "eval_ids = [\"1422\", \"1510\", \"1539\", \"1606\", \"1702\"]\n", - "test_ids = [\"1437\", \"1516\", \"1602\", \"1685\", \"1718\"]\n", - "\n", - "train_test_id_split_dict = {\"attr_key\": \"individual\",\n", - " \"train\": train_ids+eval_ids,\n", - " \"test\": test_ids}\n", - "\n", - "# Example input_data_file for 30M model: https://huggingface.co/datasets/ctheodoris/Genecorpus-30M/tree/main/example_input_files/cell_classification/disease_classification/human_dcm_hcm_nf.dataset\n", - "cc.prepare_data(input_data_file=\"/path/to/human_dcm_hcm_nf_2048_w_length.dataset\",\n", - " output_directory=output_dir,\n", - " output_prefix=output_prefix,\n", - " split_id_dict=train_test_id_split_dict)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "73fe8b29-dd8f-4bf8-82c1-53196d73ed49", - "metadata": {}, - "outputs": [ + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n" + ] + }, { "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "691e875524e441bca22b790a0f4a2a35", - "version_major": 2, - "version_minor": 0 - }, + "text/html": [ + "\n", + "

\n", + " \n", + " \n", + " [257/257 00:07]\n", + "
\n", + " " + ], "text/plain": [ - " 0%| | 0/1 [00:00" ] }, "metadata": {}, "output_type": "display_data" }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at /n/home01/ctheodoris/models/210602_111318_geneformer_27M_L6_emb256_SL2048_E3_B12_LR0.001_LSlinear_WU10000_Oadamw_DS12/models/ were not used when initializing BertForSequenceClassification: ['cls.predictions.transform.LayerNorm.weight', 'cls.predictions.decoder.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.bias', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.dense.bias', 'cls.predictions.decoder.weight']\n", + "- This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of BertForSequenceClassification were not initialized from the model checkpoint at /n/home01/ctheodoris/models/210602_111318_geneformer_27M_L6_emb256_SL2048_E3_B12_LR0.001_LSlinear_WU10000_Oadamw_DS12/models/ and are newly initialized: ['bert.pooler.dense.weight', 'bert.pooler.dense.bias', 'classifier.weight', 'classifier.bias']\n", + "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ - "****** Validation split: 1/1 ******\n", - "\n" + "kidney\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n" ] }, { "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "c2c4f53aa71a49b89c32c8ba573b0b0c", - "version_major": 2, - "version_minor": 0 - }, + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " [29340/29340 45:43, Epoch 10/10]\n", + "
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
EpochTraining LossValidation LossAccuracyMacro F1Weighted F1
10.3269000.2991930.9125000.8230670.909627
20.2242000.2395800.9264770.8502370.923902
30.2216000.2428100.9302270.8785530.930349
40.1661000.2641780.9334090.8847590.933031
50.1441000.2792820.9350000.8876590.934987
60.1128000.3076470.9359090.8892390.935365
70.0846000.3263990.9328410.8924470.933191
80.0683000.3326260.9365910.8916290.936354
90.0655000.3481740.9352270.8894840.935040
100.0461000.3553500.9350000.8945780.934971

" + ], "text/plain": [ - "Filter (num_proc=16): 0%| | 0/115367 [00:00" ] }, "metadata": {}, "output_type": "display_data" }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n" + ] + }, { "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "adf76144219747558bf39b7e776a68b3", - "version_major": 2, - "version_minor": 0 - }, + "text/html": [ + "\n", + "

\n", + " \n", + " \n", + " [734/734 00:27]\n", + "
\n", + " " + ], "text/plain": [ - "Filter (num_proc=16): 0%| | 0/115367 [00:00" ] }, "metadata": {}, @@ -196,10 +588,25 @@ "name": "stderr", "output_type": "stream", "text": [ - "Some weights of BertForSequenceClassification were not initialized from the model checkpoint at /gladstone/theodoris/home/ctheodoris/Geneformer and are newly initialized: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight', 'classifier.bias', 'classifier.weight']\n", - "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n", - "Detected kernel version 4.18.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.\n", - "/gladstone/theodoris/home/ctheodoris/Geneformer/geneformer/collator_for_classification.py:581: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + "Some weights of the model checkpoint at /n/home01/ctheodoris/models/210602_111318_geneformer_27M_L6_emb256_SL2048_E3_B12_LR0.001_LSlinear_WU10000_Oadamw_DS12/models/ were not used when initializing BertForSequenceClassification: ['cls.predictions.transform.LayerNorm.weight', 'cls.predictions.decoder.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.bias', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.dense.bias', 'cls.predictions.decoder.weight']\n", + "- This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of BertForSequenceClassification were not initialized from the model checkpoint at /n/home01/ctheodoris/models/210602_111318_geneformer_27M_L6_emb256_SL2048_E3_B12_LR0.001_LSlinear_WU10000_Oadamw_DS12/models/ and are newly initialized: ['bert.pooler.dense.weight', 'bert.pooler.dense.bias', 'classifier.weight', 'classifier.bias']\n", + "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "lung\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n" ] }, @@ -209,26 +616,100 @@ "\n", "
\n", " \n", - " \n", - " [7020/7020 26:02, Epoch 0/1]\n", + " \n", + " [21750/21750 30:32, Epoch 10/10]\n", "
\n", " \n", " \n", - " \n", + " \n", " \n", " \n", " \n", " \n", " \n", + " \n", " \n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", "
EpochTraining LossValidation LossAccuracyMacro F1Weighted F1
00.1424000.3891660.8897970.69307410.3376000.3415230.9063600.7599790.899310
20.2119000.2589540.9284290.8355340.925903
30.2086000.2820810.9304210.8427860.928013
40.1444000.2530470.9354790.8717120.935234
50.1092000.2688330.9394640.8761730.938870
60.1327000.2826970.9405360.8832710.940191
70.0818000.2958640.9408430.8842010.940170
80.0359000.3066000.9419160.8847770.941578
90.0508000.3116770.9405360.8834370.940294
100.0358000.3153600.9408430.8835510.940612

" @@ -244,193 +725,1201 @@ "name": "stderr", "output_type": "stream", "text": [ - "/gladstone/theodoris/home/ctheodoris/Geneformer/geneformer/collator_for_classification.py:581: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n" ] }, { "data": { - "text/html": [], + "text/html": [ + "\n", + "

\n", + " \n", + " \n", + " [544/544 00:19]\n", + "
\n", + " " + ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" - } - ], - "source": [ - "train_valid_id_split_dict = {\"attr_key\": \"individual\",\n", - " \"train\": train_ids,\n", - " \"eval\": eval_ids}\n", - "\n", - "# Example 6 layer 30M Geneformer model: https://huggingface.co/ctheodoris/Geneformer/blob/main/gf-6L-30M-i2048/model.safetensors\n", - "all_metrics = cc.validate(model_directory=\"/path/to/Geneformer\",\n", - " prepared_input_data_file=f\"{output_dir}/{output_prefix}_labeled_train.dataset\",\n", - " id_class_dict_file=f\"{output_dir}/{output_prefix}_id_class_dict.pkl\",\n", - " output_directory=output_dir,\n", - " output_prefix=output_prefix,\n", - " split_id_dict=train_valid_id_split_dict)\n", - " # to optimize hyperparameters, set n_hyperopt_trials=100 (or alternative desired # of trials)" - ] - }, - { - "cell_type": "markdown", - "id": "6eca8ab4-6f4d-4dd6-9b90-edfb5cc7417c", - "metadata": {}, - "source": [ - "### Evaluate the model" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "f580021e-2b70-4ebc-943c-2bfe6177e1b5", - "metadata": {}, - "outputs": [ + }, { "name": "stderr", "output_type": "stream", "text": [ - "Hyperparameter tuning is highly recommended for optimal results. No training_args provided; using default hyperparameters.\n" + "Some weights of the model checkpoint at /n/home01/ctheodoris/models/210602_111318_geneformer_27M_L6_emb256_SL2048_E3_B12_LR0.001_LSlinear_WU10000_Oadamw_DS12/models/ were not used when initializing BertForSequenceClassification: ['cls.predictions.transform.LayerNorm.weight', 'cls.predictions.decoder.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.bias', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.dense.bias', 'cls.predictions.decoder.weight']\n", + "- This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of BertForSequenceClassification were not initialized from the model checkpoint at /n/home01/ctheodoris/models/210602_111318_geneformer_27M_L6_emb256_SL2048_E3_B12_LR0.001_LSlinear_WU10000_Oadamw_DS12/models/ and are newly initialized: ['bert.pooler.dense.weight', 'bert.pooler.dense.bias', 'classifier.weight', 'classifier.bias']\n", + "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" ] - } - ], - "source": [ - "cc = Classifier(classifier=\"cell\",\n", - " cell_state_dict = {\"state_key\": \"disease\", \"states\": \"all\"},\n", - " forward_batch_size=200,\n", - " nproc=16)" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "b05398b4-bca1-44b0-8160-637489f16646", - "metadata": {}, - "outputs": [ + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "brain\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " [8880/8880 11:14, Epoch 10/10]\n", + "
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
EpochTraining LossValidation LossAccuracyMacro F1Weighted F1
10.1631000.1566400.9703450.7364550.960714
20.1498000.1348970.9688440.7471140.960726
30.1056000.1153540.9722220.7752710.964932
40.0869000.2079180.9688440.7079270.958257
50.0564000.1065480.9740990.8398380.971611
60.0376000.1174370.9782280.8565780.975665
70.0305000.1278850.9744740.8562960.973531
80.0193000.1432030.9778530.8593620.975776
90.0074000.1537580.9725980.8528350.972314
100.0172000.1539110.9759760.8581960.974498

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "

\n", + " \n", + " \n", + " [222/222 00:04]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at /n/home01/ctheodoris/models/210602_111318_geneformer_27M_L6_emb256_SL2048_E3_B12_LR0.001_LSlinear_WU10000_Oadamw_DS12/models/ were not used when initializing BertForSequenceClassification: ['cls.predictions.transform.LayerNorm.weight', 'cls.predictions.decoder.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.bias', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.dense.bias', 'cls.predictions.decoder.weight']\n", + "- This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of BertForSequenceClassification were not initialized from the model checkpoint at /n/home01/ctheodoris/models/210602_111318_geneformer_27M_L6_emb256_SL2048_E3_B12_LR0.001_LSlinear_WU10000_Oadamw_DS12/models/ and are newly initialized: ['bert.pooler.dense.weight', 'bert.pooler.dense.bias', 'classifier.weight', 'classifier.bias']\n", + "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "placenta\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " [6180/6180 10:28, Epoch 10/10]\n", + "
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
EpochTraining LossValidation LossAccuracyMacro F1Weighted F1
10.1287000.1251750.9606260.9357520.959463
20.0640000.2156070.9514560.9205790.949828
30.0513000.2030440.9611650.9341950.959470
40.0453000.1157010.9789640.9663870.978788
50.0482000.1494840.9735710.9589270.973305
60.0409000.1343390.9789640.9674660.978899
70.0016000.1599000.9784250.9667130.978211
80.0024000.1253510.9795040.9680640.979428
90.0094000.1201320.9805830.9696310.980506
100.0015000.1378640.9789640.9671800.978825

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "

\n", + " \n", + " \n", + " [155/155 00:05]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at /n/home01/ctheodoris/models/210602_111318_geneformer_27M_L6_emb256_SL2048_E3_B12_LR0.001_LSlinear_WU10000_Oadamw_DS12/models/ were not used when initializing BertForSequenceClassification: ['cls.predictions.transform.LayerNorm.weight', 'cls.predictions.decoder.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.bias', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.dense.bias', 'cls.predictions.decoder.weight']\n", + "- This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of BertForSequenceClassification were not initialized from the model checkpoint at /n/home01/ctheodoris/models/210602_111318_geneformer_27M_L6_emb256_SL2048_E3_B12_LR0.001_LSlinear_WU10000_Oadamw_DS12/models/ and are newly initialized: ['bert.pooler.dense.weight', 'bert.pooler.dense.bias', 'classifier.weight', 'classifier.bias']\n", + "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "immune\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " [17140/17140 22:02, Epoch 10/10]\n", + "
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
EpochTraining LossValidation LossAccuracyMacro F1Weighted F1
10.2889000.2315820.9367700.8684050.934816
20.2032000.2062920.9373540.8886610.939555
30.1835000.1958110.9449420.8911490.944008
40.1510000.2195810.9476650.9065780.947093
50.0900000.2471200.9466930.8988120.945808
60.0604000.2496620.9484440.9050140.947975
70.0713000.2727670.9494160.9115140.949748
80.0526000.3050510.9453310.9023480.944987
90.0269000.2941350.9486380.9040580.948296
100.0345000.2920290.9501950.9085470.949753

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "

\n", + " \n", + " \n", + " [429/429 00:13]\n", + "
\n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at /n/home01/ctheodoris/models/210602_111318_geneformer_27M_L6_emb256_SL2048_E3_B12_LR0.001_LSlinear_WU10000_Oadamw_DS12/models/ were not used when initializing BertForSequenceClassification: ['cls.predictions.transform.LayerNorm.weight', 'cls.predictions.decoder.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.bias', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.dense.bias', 'cls.predictions.decoder.weight']\n", + "- This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of BertForSequenceClassification were not initialized from the model checkpoint at /n/home01/ctheodoris/models/210602_111318_geneformer_27M_L6_emb256_SL2048_E3_B12_LR0.001_LSlinear_WU10000_Oadamw_DS12/models/ and are newly initialized: ['bert.pooler.dense.weight', 'bert.pooler.dense.bias', 'classifier.weight', 'classifier.bias']\n", + "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "large_intestine\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n" + ] + }, { "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "8e93a706295b49a1996b275eba3e9f31", - "version_major": 2, - "version_minor": 0 - }, + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " [33070/33070 43:02, Epoch 10/10]\n", + "
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
EpochTraining LossValidation LossAccuracyMacro F1Weighted F1
10.3062000.3124310.9082660.7862420.900768
20.2239000.2480960.9251010.8412510.920987
30.1736000.2599970.9259070.8503480.926290
40.1629000.2823060.9250000.8736690.925531
50.1434000.2544940.9379030.8767490.937836
60.1045000.2899420.9346770.8753330.934339
70.0803000.3139140.9354840.8772710.934986
80.0635000.3398680.9362900.8822670.936187
90.0425000.3457840.9389110.8829630.938682
100.0389000.3521990.9395160.8855090.939497

" + ], "text/plain": [ - " 0%| | 0/87 [00:00" ] }, "metadata": {}, "output_type": "display_data" - } - ], - "source": [ - "all_metrics_test = cc.evaluate_saved_model(\n", - " model_directory=f\"{output_dir}/{datestamp_min}_geneformer_cellClassifier_{output_prefix}/ksplit1/\",\n", - " id_class_dict_file=f\"{output_dir}/{output_prefix}_id_class_dict.pkl\",\n", - " test_data_file=f\"{output_dir}/{output_prefix}_labeled_test.dataset\",\n", - " output_directory=output_dir,\n", - " output_prefix=output_prefix,\n", - " )" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "b45404e4-87cc-421d-84f5-1f9cbc09aa31", - "metadata": {}, - "outputs": [ + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n" + ] + }, { "data": { + "text/html": [ + "\n", + "

\n", + " \n", + " \n", + " [827/827 00:26]\n", + "
\n", + " " + ], "text/plain": [ - "
" + "" ] }, "metadata": {}, "output_type": "display_data" }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at /n/home01/ctheodoris/models/210602_111318_geneformer_27M_L6_emb256_SL2048_E3_B12_LR0.001_LSlinear_WU10000_Oadamw_DS12/models/ were not used when initializing BertForSequenceClassification: ['cls.predictions.transform.LayerNorm.weight', 'cls.predictions.decoder.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.bias', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.dense.bias', 'cls.predictions.decoder.weight']\n", + "- This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of BertForSequenceClassification were not initialized from the model checkpoint at /n/home01/ctheodoris/models/210602_111318_geneformer_27M_L6_emb256_SL2048_E3_B12_LR0.001_LSlinear_WU10000_Oadamw_DS12/models/ and are newly initialized: ['bert.pooler.dense.weight', 'bert.pooler.dense.bias', 'classifier.weight', 'classifier.bias']\n", + "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "pancreas\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n" + ] + }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjUAAAHHCAYAAABHp6kXAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy81sbWrAAAACXBIWXMAAA9hAAAPYQGoP6dpAAB20klEQVR4nO3deVzM+R8H8Nd0TOUYqUQhJZWEykpy5JYldxaLjW0tkmPJop91Rjl2kTOWRXbXLee6b+teR7kld0jXdB8z8/ujbdaYSjUlTa/nPub3M5/v9/P5fr59O97zOQUymUwGIiIiojJOo7QrQERERFQcGNQQERGRWmBQQ0RERGqBQQ0RERGpBQY1REREpBYY1BAREZFaYFBDREREakGrtCtA+bt+/TpkMhm0tbVLuypERFQEmZmZEAgEcHR0LLFr3L9/HxkZGcVSllAohI2NTbGU9akxqPnMyWQyZGZJ8DI6qbSrQp9A7Rr6pV0F+qQEpV0B+gQ+xRq3GRkZSElNw9vYZJXKMTaoWEw1Kh0Maj5z2traeBmdhK/8j5V2VegTeLRvWmlXgT4hLQ0GNeVBVOQ9aH6CwR5vY5Px1dQdKpWxLcAD5jV1i6lGnx6DGiIiInUhKN9DZRnUEBERqQtB+W79K98hHREREakNttQQERGpC3Y/ERERkVoo591PDGqIiIjUgUCgektNGQ+Kync7FREREakNttQQERGpizLe0qIqBjVERETqopwPFC7fd09ERERqgy01RERE6oLdT0RERFT2FcPspzK+ySq7n4iIiEgtsKWGiIhIXbD7iYiIiNQCZz8RERERlX1sqSEiIlIX7H4iIiKiMk+AYtj7qVhqUmoY1BAREakFTunmmBoiIiJSC2ypISIiUhcaZbulRVUMaoiIiNQFp3QTERERlX1sqSEiIlIXnNJNREREaoHdT0RERERlH1tqiIiI1AW7n4iIiKjMExTD4ntlPChi9xMRERGpBQY1RERE6kIgUO2losjISHh5ecHBwQEuLi7w9/dHWlraR/OlpKRg0aJF6NixI+zt7dG5c2csW7YMGRkZhbo+u5+IiIjURSnOfhKLxfD09ISpqSmCgoIQGxuLgIAAxMfHY9GiRfnmnTlzJo4dO4YffvgBVlZWuHXrFoKCgpCQkIBp06YVuA4MaoiIiNRFKY6J2bJlC8RiMUJDQ2FgYAAA0NTUhK+vL0aNGgVLS8tc82VlZeHQoUP47rvvMGTIEABA8+bN8erVKxw8eLBQQQ27n4iIiEhlZ86cgYuLizygAQA3NzcIhUKcPn06z3wymQwSiQSVK1dWSBeJRJDJZIWqA1tqiIiI1EIxzH6CAFFRURg/fnyeZxw/fjzX9IiICPTt21chTSgUwszMDBEREXmWp62tjT59+iAkJARNmjRBvXr1EBYWhm3btmHw4MGFqj2DGiIiInVRit1PYrEYIpFIKV0kEiEhISHfvDNnzsSMGTPw1VdfydOGDBkCHx+fQtWBQQ0RERHJmZiY5NkaUxQymQyCjwRbixYtwqlTpzBnzhxYWFjg9u3bCAoKgkgkwtixYwt8LQY1RERE6qIUZz+JRCKIxWKl9MTExDwHCQPAgwcPsH79eqxcuRIdOnQAADg5OUEgEGDBggUYNGgQDA0NC1QHDhQmIiJSBwJkBzUqvYp+eUtLS6WxMxkZGXj27Fm+Qc2jR48AALa2tgrptra2yMrKwsuXLwtcBwY1REREpDJXV1dcvHgRcXFx8rSjR48iIyMDbdq0yTNfzZo1AQC3b99WSA8PDwcA1KpVq8B1YPcTERGRuijFgcIDBgzA5s2b4e3tDW9vb8TExCAwMBDdu3dXaKnx8/NDaGgo7ty5AwBo2LAhGjdujBkzZuDdu3ewsLBAWFgYVq5cia5duypMEf8YBjVERERqoXimdBeVSCTCxo0b4e/vjzFjxkBXVxfu7u7w9fVVOE8qlUIikcjfa2pqYvXq1Vi6dCnWrl2Ld+/ewcTEBIMHD8bIkSMLVQcGNURERFQsLCwssG7dunzPCQwMRGBgoEKaoaEhZs+erfL1GdQQERGpi1LsfvocMKghIiJSF6U4pftzwKCGiIhIXZTzlpryHdIRERGR2mBLDRERkZr42HYE6o4tNSUoPj4eo0ePhpOTE2xsbHDs2LHSrhIREakxgUCg0qusY0tNCVq3bh0uXbqE+fPnw9DQEBYWFqVdJSIiIrXFoKYERUREwMbGRr5BFxERUYkRQKW9m+RllGHsfiqiKVOmwN3dHZcuXUKvXr3g4OAADw8P+V4VNjY2OH78OK5evQobGxvY2NiUco2JiEi9qdb1lN39VLajGgY1KoiOjoa/vz+8vLywePFipKWlwcfHB5mZmdi6dSuaNGmCBg0aYOvWrdi6dWtpV5eIiEitsftJBQkJCdi8eTOsrKwAADo6Ohg2bBhu3ryJpk2bQiQSQUtLCw4ODqVbUSIiKhfUYbCvKhjUqMDY2Fge0ACQ70L65s2b0qrSZ6OirhD/8+qAXm3tUFWkh4fP3mHJH2ex62T4R/O2crDAhEGuaGhZHXq62nj6Kg6bDl7Dr6GXIZXKcs2jK9TC2V+9Ua+2EX5adRjLt50v7lsiAMmp6Vi87iAOnrqJeHEKLM2MMeLrDnBv7/jRvDFxiZgfvB8nL9xBanoG6luaYsK3X6LFF9YK56VnZGHTrjPYdfgqXryORQU9IeysasFnSCc0afjfYPsXr2PRdqB/rtda8tOQAtWJ8packo5F6w7iwMkbiE/Mftajvu6AHh2afDTvu7hEBKzehxMX7iA1LQO2lqbw/a4rWr73rJ9HxaL1gDl5luHarD42LRwBAHj1Ng4zl+7G3YiXeBeXBC1NDdQ2MUT/bs4Y1KMFtLQ0Vb9hNcGghopMJBIpvNfW1gYApKenl0Z1PiubZg9Ak/o1MWvtUTx6HgOPDo2wbvpX0NAQYMfxsDzztWlSFzsXfIO/bz3FuJ/3IiU1A1+2rI/5Y7rBwtQAU5f/lWs+v287oIKusKRuh/7lPf03hN17jknDu8G8djXsO/4Pxs8JgVQqRY+OX+SZLz0jC0MmroI4KQ3TfHrBsGolbA49j28nr8HGRSPh7FBPfu7/Fm3D3uPXMPLrDnBxtEJ8YgqC/ziOr8evwNZlY2BvW0eh7G96t0L3jop/aM1rViveGy+HRv60HjfvPcfkEe6wqFUNe4/9g7GzQyCTytCzU/7PetAPqyBOSsWMMb1hqF8Jm0LPwXNSMDb/MgrN/33WxoYi7Fo5Tin/kXNhWP3HCbi1biRPS0nNQKWKOhjzTWfUrF4VGZkSnLp4BzOW7sKdRy8x/8cBxf8FoDKJQQ0Vu07OVmjvVA/fzdmOnSeyA5hzNyJRu7o+Zo1ww66T4Xm2uHzdxRGZEgkG+G1GSlomAOD0P49Rr7YRvnZzzDWoaVK/Jr7v7Yzv5+7Axln85VZSTl28g/NXH2DxtMHo/u+ndRdHK7x8E4f5wfvQrZ0jNDVzH6a3/eAlPIh8jW3Lx6KJnTkAoLljPbh7LcKC4P3YuWo8gOw/iPuO/4PuHZpggldXef4vGlqghcdM7D32j1JQY1q9KhwbmBf7/ZZnJy/ewdmrD7D0pyHo+W/A2KKJFV6+icW81fvg3j7vZ73twEXcj4zCzhXj8EVDcwCAi2M9fOm1EAGr92HP6h8AADpCLfn3wvsWrDkAPV2hQotQvTrV8YvfIIXz2jW3xbv4JOw8dAWzx3tAR8g/ZwBbajhQmIpdt1a2SExJR+ip2wrpfxy6DtNqIjS1rZVn3swsCTIyJUhNz1JIT0hKRVpGltL52lqaWP5jL/y65zKu339VPDdAuTpyLgwV9XTwZVt7hXSPLs3w5p0YN+8+zTPv0XNhqFvbWOGPmJamJnp2+gI37z3D6+h4AICGhgAaGgJUrqirkL9SBR1oaAj4h+sTOXwm+1l3+/BZf+mMN+8ScCOfZ334bBjqmhnLAxoA0NLSRK9OTXHz7n/POjdPX77DpZsR6NbWQel7IDeG+pWgoSGApkb5/kOuQKDiq4xjUEPFztaiOh48jYZEKlVIv/349b/HjfPM+9u+qxBqa2L+mK6oYVgZooq66N/JHu6tbBG05ZzS+T9+0xYVdIWYt/548d4EKXkQ+RqWdYyhpak4fsGmrqn8eN55o2BT10Qpvf6/eR8+yR6Hpq2liUE9W2D34as4ei4MiclpePE6Fv9btA2VK+riK/fmSmUE/3ECtp0moWGXyeg/ZhmOnf/4uC3K3/3IKNSrU11prIqtZfYzvP84Kt+8trk865y8D57k/X2y7eAlyGQyDHB3zvW4TCZDVpYECYkp2HfiOnYcuozvvmrHMTXv4YrCRMXMQKSHJ1FxSulx4tR/j1fIM++1uy/Qc8IG/DajP4b3zv7FliWRYPbaY1ix/W+Fcxta1sDYAS0x8H9/ICUtE4ZVivEmSEm8OAW1TQyU0vX/fZ5x4uR88+rn8tyr/JsW/17e/43uhUoV9TB6xgZ5N6Vp9aoI+cVbYayMUFsL/bs1R8um1jA2FOHVmzhs2n0OI6etx1zfr9C/m3IARAUTL05BbVNDpfT/nnVKvnmr5PasK1fMzpuQe16JRIqdh6/A0swYTRvVzfWcVX8cx4I1BwBk//EePbgjfL/rmuu5VD4xqCmiwMBApTQDAwPcv39f/j44OPhTVunzkvuQmexD+RyztzZByOyBuHb3BSb8shfJaZlwdbTA/77tAB2hFhaFnAYAaGpoYPmPvbD7ZDhOXHlUzJWnvOT3Se5jn/IKmnfl5mNYt+0Uxnq6oWnjukhKTkNI6Dl4+q7GhoUjYGeV3X1pbCjCXN+v/iukEfBlWwf09V6CRWsOoG8XJ6VWJSq4/J7mxz7QC/LJnVfe05fv4nV0AvxG9cgzr0eXZmj1hTXiE1Pw9z8PsWbLCSQmpWLW+L75V6icEAhUH1NT1htrGNRQsYsVp6KqSE8pPSctLjHvT3kLx7kjOi4Jg6f/Kf+Ufu5GJKQyGaZ4tsP2Y7fwNCoOozxcYG5aFcNmbYPo3753UUUdANkDEEUVdZGUmp7ngGQqPH1RBYUWlRzx/35q16+cdwucvqgC4hKU8yb8m7fKv3kfPX2DJb8dwuQR7viufzv5eW2cbdFl6HzMW7kHvy8ened1tLU00a2dAxauOYAnL96hXp3qBbs5UpD9rJV/Tgv6rHP7PklITJYfz83WA5egraWJPm5N8yzb2FAEY8PsWaeuTvVRpXIFzA/ej35dndHQOu+xeuWJOnQhqYJjaqjY3Yl8A+s61aCpofjt1cAi+w/M3ci3eeZtVK8Gbjx4pRSMXL/3EpqaGrCpk939YGthjCqV9PDP7+PxdL8fnu73w7l12X/spnl1wNP9fvLrUfGwsTBBxNO3yJJIFNIfRGaPr7C2qJFnXuu6JvLz3nf/g7z3Il5BJpOhkU1thfO0tTRR39I033E7OXJaAjXK+S93VdSva4JHT98gK0vxWd/7dyxNbuOj3s97L5cxN/K8Fsp538Ul4sSFO+jY0g5GVSsXuJ729c0AAJEvoguch9QbgxoqdgfO3kXlCjro0aaBQvpAN0e8ihbj6t0XeeZ9/S4RjjY1ofHBbAYnu+w/cq+iEwAAS/44C/fx6xVeXrO3AQDW77kM9/HrEfkytjhvq9zr3LoRklPTcfj0LYX0XYevoLqRSGmqtULeVo0Q8ewtbtz5b9ZMlkSCPUevwd7WDNWNsgdE5XwK/3B2TXpGFu48fIEa1fTzrWNmlgQHTt5A1SoVUaemUWFuj97TuXVjJKem468zis9656ErqG5UBQ75PevWjRHx7C2uv/+ssyQIPXoNDg3qyJ/1+3YdvorMLAm+6lq4cVAXr2d3PZvzWctxoDBRMTt2+SFOXHmEn8e7o3IFHTx+GYu+HRqho7MVvp+7Q94KEzSpJwa6OaDJoCV4/iY7WFm54wIWjO2GLXMH4bf9V5Galok2Tepi9FctcPJqBMIjsmfJPHz+Dg+fv1O4bu3q+gCAyFdxOH/zySe73/KijbMtWja1xvQlO5CYkoY6NY2w//h1nLl8Dz/7DZKvWzJlwRbsPnwVJ373Q80a2QOLPb50xubQ8xgzayMmDe8Gw6qV8fue84h8/hYbF42UX6NpIws0rl8bQRsOIzUtE80a10Vicho27T6L51GxWOT3tfzcuSv2IEsiwRcNLWBkUBlRb+MRsvss7j56ifmTB+S5jgp9XLvmtmjd1BrTftmBpOTsZ73v+D84ffkelkwbLP/a/jh/C3YevoLTf/wPtf591l91dUZI6DmMnrEBk793h2HVSggJPY/Hz95i8y+jcr3e1gMXYWqsjzbNct/495f1f+FdXCKcG1uierUqECel4vSle9hy4CK6tbVXatkr18p+XKISBjVUIr6ZvgXTvuuAqcPao2plPTx8/g5es7cpbJOgqaHx70DO/34K1+6+hKh3Ynh7tECQb0/o6mjh+et4zN94Cqt2/J3LlehTWjl7GH759SCW/nYoe+n82sZKWxJIpTJIpFKFseI6Qi2E/DwK84P3Yfay3dlL59eriXXzv1dYTVhDQwMbF47E2q0n8dfpm1i37RQq6AlRr051rAscjjbOtvJzrS1qYMu+C9h3/B8kJaehYgUdNK5vht8WjEBrp9z/OFLBrZ7zLRb+egC/rP8LCYkpqGtWHUHThygsiieRSiGRSBUG/+sItfD7L94IWL0XM4J2ITUtEw3qmWLDgu/lqwm/71p4JCKevcVYz87Q0Mg9EG1sUxsbdp7FkXPhiE9Iho5QG/XMq+On0T0xuGfLYr93KrsEMll+c1HUw4YNGxAQEIC2bdsqzEh68eIFOnTooHS+vb09tm3bppAWGRkJf39/XLt2DXp6eujWrRt8fX2hq/vfAlE2Nnn/Ij179iyMjfNenyUvYWFhePIqDl/5Hyt0Xip7Hu2bVtpVoE9Ii4vGlQtRkfegqQE0atTo4ycXUVhYGJ5GJ2PopicqlbPhG3PUqVaxROtaktS+pSY6OhorVqyAoaHymgs5JkyYAGfn/xZ7qlixosJxsVgMT09PmJqaIigoCLGxsQgICEB8fDwWLVokP2/r1q1KZU+ePBl6enpFCmiIiIgKQx3GxahC7YOahQsXon379nj1Ku8l9OvUqQMHB4c8j2/ZsgVisRihoaEwMMjuN9bU1ISvry9GjRol3537wzJevHiBJ0+eYNKkSSrfBxEREeWv1EfSTZkyBe7u7rh06RJ69eoFBwcHeHh4IDxc9aXOr169imPHjmHixIkqlXPmzBm4uLjIAxoAcHNzg1AoxOnTp/PMt3//fggEAri7u6t0fSIiooIo77OfSj2oAbK7iPz9/eHl5YXFixcjLS0NPj4+yMzM3qVZIpEgKysr35fkg7UzJBIJ5syZg5EjR36062fmzJmwtbWFi4sLpk2bhvj4eIXjERER8taYHEKhEGZmZoiIiMiz3AMHDsDJyQk1auS9fgcREVGxKecbWn4W3U8JCQnYvHkzrKysAAA6OjoYNmwYbt68iaZNm2Lo0KG4fPlyvmXUrFkTJ06ckL//448/kJKSgqFDh+aZRygUYuDAgWjVqhVEIhFu3ryJ1atXIzw8HNu3b4e2tjaA7DE1IpFIKb9IJEJCQkKuZd+7dw8PHjzA7NmzP3b7REREqiuGbRLKemDzWQQ1xsbG8oAGgLxV5M2b7DVJZs2aheTkvDfLA7IDlBwxMTEICgrC/PnzFdJzu+7MmTPl75s1awYrKyuMGDECR48eRdeu+W+UJpPJ8vwG2rdvH7S1teHm5pZvGUREROqiIDOFP5TXTGQA0NbWLtRwlM8iqPmwFSSnhSQ9PR1A9kDej808fz+4WLp0KaytrdG0aVOIxWIAkHdTicViVKhQAVpaud96mzZtUKFCBdy+fVse1IhEInk570tMTFTqlgKyg52DBw+idevW0NfXz7feRERExUGAYtjQUoW8BZ0p/CFjY2Ol2cMymQzDhw9XmJlcEJ9FUPMxhe1+ioyMxNWrV+Hk5KR0npOTE9auXQtXV9cCX9/S0lJp7ExGRgaePXuGvn2Vd4e9du0aXr16xVlPRET0SZXmYN+CzhT+kFAoVJo9fOnSJSQmJhZ6ok2ZCGoK2/3k5+en1LIyb9486OrqYsKECfkuknfy5EmkpKQoLDzk6uqKVatWIS4uDlWrVgUAHD16FBkZGWjTpo1SGfv27UOFChXQrl07pWNERETqKK+Zwn5+fjh9+nSeQU1u9u/fj0qVKqF9+/aFqkOZCGrq1q1bqPNtbW2V0kQiESpUqKDQlDV//nwIBALY29tDJBLh1q1bCA4ORsOGDdGxY0f5eQMGDMDmzZvh7e0Nb29vxMTEIDAwEN27d1d6SFlZWTh8+DA6duwIPT29Qt4pERFR0RVHS01UVBTGjx+f5/Hjx4/nmh4REaHUe1GQmcIfyszMxJEjR9CpUyfo6OgUOB9QRoKaklK3bl38+eef2Lp1K9LS0lC9enV4eHhg7NixCmNuRCIRNm7cCH9/f4wZMwa6urpwd3eHr6+vUpnnzp1DXFwc16YhIqJPrxRnLxVlpnBuzpw5g/j4+CL9HS31oCYwMFApzcDAAPfv3y/W64SEhCil9evXD/369StQfgsLC6xbt+6j57Vt27bY605ERPSpmJiY5NkaUxT5zRTOzb59+2BkZAQXF5dCX+uzWHyPiIiIVKXaasLZgUfRm3rymymcWwtObpKTk3Hq1Cl8+eWX0NTULHQdGNQQERGpidLcJiG/mcIFHSR89OhRpKamonv37kWqA4MaIiIiUpmrqysuXryIuLg4eVp+M4Vzs3//fpiZmcHe3r5IdWBQQ0REpA4ExdBSo0JjzYABA1C5cmV4e3vj7NmzCA0NxZw5c5RmCvv5+aFBgwZK+WNjY3HhwgV069atyHUo9YHCREREVExKcfZTQWcKS6VSpU2oAeCvv/5CVlZWkbueAAY1REREaqM0VxQGCjZTODAwMNeZz4MGDcKgQYNUuj67n4iIiEgtsKWGiIhITZR2S01pY1BDRESkBkp7l+7PAbufiIiISC2wpYaIiEhNsPuJiIiI1EP5jmnY/URERETqgS01REREaoLdT0RERFT2FcOmlCjjQRG7n4iIiEgtsKWGiIhITZTxhhaVMaghIiJSExxTQ0RERGqhnMc0HFNDRERE6oEtNURERGqAez8xqCEiIlIb7H4iIiIiUgNsqSEiIlIHAkBDQ9XF94qnKqWFQQ0REZGaYPcTERERkRpgSw0REZGa4OJ7REREpBbKeUzD7iciIiJSD2ypISIiUhPsfiIiIqIyjysKM6ghIiJSG+W8oYZjaoiIiEg9sKWGiIhILQiKYUxN2W7qYVBDRESkDgTF0P1UtmMadj8RERFR8YiMjISXlxccHBzg4uICf39/pKWlFShvfHw8Zs6ciVatWqFRo0Zwc3PDli1bCnV9ttQQERGpidKc0i0Wi+Hp6QlTU1MEBQUhNjYWAQEBiI+Px6JFi/LNm5ycjCFDhkBHRwd+fn4wNDTE06dPkZmZWag6MKghIiJSE6U5+2nLli0Qi8UIDQ2FgYEBAEBTUxO+vr4YNWoULC0t88wbHByMtLQ0bN++Hbq6ugAAZ2fnQteB3U9ERESksjNnzsDFxUUe0ACAm5sbhEIhTp8+nW/enTt3wsPDQx7QFBVbaoiIiNREcXQ/RUVFYfz48XkeP378eK7pERER6Nu3r0KaUCiEmZkZIiIi8izv+fPnePfuHUQiEUaMGIHz58+jYsWK6Nq1KyZPnlyoQIctNURERGoge0VhFV8qXF8sFkMkEimli0QiJCQk5Jnv3bt3AIAFCxbAwMAAa9euhY+PD0JDQ+Hv71+oOrClhoiIiORMTEzybI0pCplMlm8LklQqBQBYWloiICAAAODi4oKsrCwsWLAA48aNQ7Vq1Qp0LbbUEBERqQmBQKDSSxUikQhisVgpPTExMdcWnBz6+voAgObNmyukN2/eHFKpNN+uqw+xpaYMMDOpiud/TS/tatAnULv1+NKuAn1Cz88uKe0q0CfwKWcklebsJ0tLS6UAJCMjA8+ePVMaa/O+2rVrQ1tbWyldJpMBADQ0Ct7+wpYaIiIiNVGaLTWurq64ePEi4uLi5GlHjx5FRkYG2rRpk2c+oVCIli1b4sKFCwrpFy5cgJaWFurVq1fgOjCoISIiIpUNGDAAlStXhre3N86ePYvQ0FDMmTMH3bt3V1ijxs/PDw0aNFDIO3r0aNy/fx8//vgjzp07hw0bNmDZsmUYNGiQwhTxj2H3ExERkToo5b2fRCIRNm7cCH9/f4wZMwa6urpwd3eHr6+vwnlSqRQSiUQhrXHjxggODsbPP/+MkSNHQl9fH4MHD8a4ceMKVQcGNURERGqiNLdJAAALCwusW7cu33MCAwMRGBiolN6yZUu0bNlSpeuz+4mIiIjUAltqiIiI1EQpN9SUOgY1REREaqK0u59KG7ufiIiISC2wpYaIiEgNZO/9pFpLTVlv52FQQ0REpCbKee8Tu5+IiIhIPbClhoiISC2ovtVBWe+AYlBDRESkDkp5ReHPAYMaIiIiNcEp3URERERqgC01REREaqKcN9QwqCEiIlIXGuU8qmH3ExEREakFttQQERGpgewVhVUvoyxjUENERKQmOPuJiIiISA2wpYaIiEhNaJTvhhoGNUREROqivHc/FSiomTp1aoELFAgEmDdvXpErRERERFQUBQpqLl26VOACy3uUSEREVCq491PBgpoTJ06UdD2IiIhIRYKyHpWoiGNqiIiI1IAAqg8ULushUZGDmrNnz+Ly5cuIi4uDt7c3TE1NcevWLdSqVQsGBgbFWUciIiKijyp0UJOamgpvb29cuHBBPn5m4MCBMDU1xfr162FiYoLJkycXe0WJiIgof+V9XGuhF99bvHgxwsPDsWzZMly9ehUymUx+rGXLlvj777+LtYJERERUMAKBaq+yrtAtNYcOHcK4cePQqVMnSCQShWOmpqaIiooqtsoRERERFVShg5rY2FjUq1cv12MaGhpIS0tTuVJERERUWIJyv6JwobufqlevjgcPHuR67P79+6hVq5bKlSIiIqLCK+/dT4UOajp37ozVq1fjzp078jSBQICXL19iw4YN6NKlS7FWkIiIiMqGyMhIeHl5wcHBAS4uLvD39y9QD86QIUNgY2Oj9IqIiCjU9Qvd/TR69GhcuHAB/fr1g5WVFQQCAaZOnYpnz57BwsIC33//fWGLJCIiomKg+uwn2cdPyYNYLIanpydMTU0RFBSE2NhYBAQEID4+HosWLfpo/iZNmijNni5s70+hg5pKlSphy5Yt2LRpE06dOgUzMzPo6elhxIgR8PT0hK6ubmGLJCIiIhUVRxeSKvm3bNkCsViM0NBQ+Xp1mpqa8PX1xahRo2BpaZlvfpFIBAcHh6JXAEVcfE9XVxfff/89W2WIiIgIAHDmzBm4uLgoLMDr5uYGPz8/nD59+qNBTXEo8orC6enpuH37NuLj46Gvrw87Ozvo6OgUZ92IiIioEDSKofspKioK48ePz/OM48eP55oeERGBvn37KqQJhUKYmZkVaGzM5cuX4eDgAIlEAnt7e4wbNw5OTk6Fqn2RgprffvsNK1euRFJSEmQyGQQCASpWrAhvb298++23RSmSiIiIVFSaE5jEYjFEIpFSukgkQkJCQr55nZyc0LNnT5ibm+Pt27dYt24dhg0bhpCQEDg6Oha4DoUOakJCQjB//ny0bNkS7u7uMDIywrt377Bv3z4sXLgQWlpa+OabbwpbLBEREamoOLZJMDExybM1pihyGj/yM3bsWIX3bdu2hbu7O1auXIm1a9cW+FqFDmo2btyIHj16YMGCBQrpvXv3hq+vLzZt2sSghoiIqJwRiUQQi8VK6YmJiYUeT1OhQgW0adMGhw8fLlS+Qq9T8/btW3Tv3j3XYz179sTbt28LWyQREREVAw2Bai9VWFpaKo2dycjIwLNnz4o0SPj9vSULqtBBjbm5OWJiYnI9Fh0djTp16hS6EkRERKQaAbK7n1R6qXB9V1dXXLx4EXFxcfK0o0ePIiMjA23atClUWSkpKTh9+jQaNWpUqHyFDmrGjh2LoKAgpa0S7t27h+XLlyv1ixEREZH6GzBgACpXrgxvb2+cPXsWoaGhmDNnDrp3767QUuPn54cGDRrI31+9ehWjRo3Crl27cPHiRezduxeDBg1CdHQ0Ro8eXag6FGhMzciRIxXeSyQS9OrVC/Xq1UO1atUQHR2NR48ewdjYGLt27UKnTp0KVQkiIiJSXWnu3yQSibBx40b4+/tjzJgx0NXVhbu7O3x9fRXOk0qlkEgk8vfVqlVDRkYGfvnlF8THx0NPTw+Ojo6YNWsWGjduXKg6CGQF6LRq3759wQsUCIp11HR5FxYWBqkMqFnXtrSrQp9A7dbjS7sK9Ak9P7uktKtAn8DLx3ehIUChu1IKIywsDNFJGdgQqa1SOUMtMlGtkrBE61qSCtRSc+LEiZKuBxEREZFKiryiMBEREX1eVJ3BVNapFNTExsbmuqW4qampKsWWqClTpiA8PBz79+8v7aoQEREVH0ExLL5XxoOiIgU1K1euREhICOLj43M9fvfuXVXqREREREVQxmMSlRV6SveOHTuwdu1aDBkyBDKZDCNGjMD333+PGjVqoE6dOvD39y+JehIRERHlq9BBzR9//IERI0ZgxIgRAIBOnTrhhx9+wF9//YWKFSsqLLrzObt06RJ69eoFBwcHeHh4IDw8XH5MKpXit99+w5dffomGDRuiZcuWGDt2LBITEwEAy5Ytg6OjI8LDw9GvXz80btwYvXr1Qnh4ONLT0zFjxgw0a9YMrq6u2LBhQyndIRERlScCZO/SrcqrrLf0FDqoefr0Kezt7aGhkZ01MzMTAKCrq4tvv/0W27ZtK94aloDo6Gj4+/vDy8sLixcvRlpaGnx8fOT3MmfOHCxcuBBt27bF6tWrMX36dFSsWBEpKSnyMjIzM+Hn54eBAwdi2bJlkEgkGDNmDPz8/KCrq4vFixejY8eOCAgIwD///FNat0pEROWIQKDaq6wr9JgaLa3sLAKBAJUqVcLr16/lx6pWrYo3b94UX+1KSEJCAjZv3gwrKysAgI6ODoYNG4abN2/C0NAQf/75J3744Qd5axQAuLm5KZSRmZkJX19fuLq6Ashu3Rk5ciQcHBwwdepUAEDz5s1x6NAhHDp0CE2aNPlEd0dERFQ+FTqoqVOnjjyQadSoEbZv344OHTpAQ0MDW7duRc2aNYu9ksXN2NhYHtAAkC/f/ObNGzx8+BAymQweHh75lqGhoYHmzZvL35ubmwMAWrRoIU/T1NSEmZmZQuCnjpJT0rHw1wPYd+IGEhJTYGlmDO9BHdGz48cDuXdxiZi7ci+OX7iD1LQMNKhniknfdUOrptYK5/UbswwXb0Qo5W/TrD42//zfite/rP8Li3/Le1fX5TO+KVC9qHAq6gnxv1Hd0atjE1QVVcDDp2+wZMNR7Dp67aN5W31hhQnD3NDQqib0dIV4+vIdNu35G79uPwOp9L+1QYXaWhjRvw0GujvDzNQQySnpuHX/ORauO4TLtyJL8vbKpc/p5xoAIl9EY8lvh3HxxiPExCejupEInVs1xNhvOqNqlYqq3azaEKg++6mMd0AVOqhxdXXFlStX0Lt3b3z//ff47rvv4OTkBE1NTaSkpGDevHklUc9iJRKJFN5ra2evwJieno74+HhoaWnB0NAw3zJ0dXUhFAqVyqhcubJS2enp6cVR7c/W8GnrcevuM0wZ6Y66tY0RevQafGZtglQmQ+9OX+SZLz0jCwPGr4Q4KRWzxvaGYdXK2LjrLIb4rsYfi73h4lhP4XwzU0Msmz5EIU1USU/h/UB3F7R1Vl59+ccFW/H05Tu0da6vwp1SXjYtGI4mDepg1vI9ePTsLTy6NMW6ecOgoSHAjsNX88zXppkNdgaNxt/XH2Hc3D+QkpaBL1s3wnzffrCoZYSpP++Un7v0fwPRr4sTFm84gjNXH6CqqALGe3bG/uDx6OL1C/658/RT3Gq58Tn9XMfEJaHnyCWoXEEXvt91Rc3qVRH+4AV+WX8IF64/wsFfJ8qHRJR36tCFpIpCBzU+Pj7yf7u4uODPP//EwYMHIRAI0KZNG4XWi7JIX18fWVlZiImJ+WhgQ8CJC3dw9sp9LJsxBL06Zv+ia9HECi/exGHuyr3o0d4Rmpq5/7LZcuAi7j+OQuiqcfiioUV2Xsd6cBu2EPNW7cW+NRMUztfV0UYTO/N862NirA8TY32FtOdRMXgQ+Rq9O32BKpUrFO1GKU+dWjRA++a2+O5/v2HnkeyWmXPXHqJ2DQPMGtsLu45eU2hxed/X7s7IzJJgwA+rkZKWAQA4ffk+6tWpjq/dm8uDGqG2FjzcmmLH4auYu/q/NaYu3XyMe4fmod+XTRnUFKPP7ef6yLkwxCUkY+VMT3lrT4smVsjIzML8NQdw59ErNLSupeJdkzpQObRt3LgxpkyZgsmTJ5f5gAbIHgcjEAiwc+fOj59MOHTmFirq6cC9rYNC+lddm+HNuwRcz+cPzeEzt2BpZiz/xQcAWlqa6N35C9y4+wxR0fHFUsetBy5BJpNhgHvZ//78HHVrZ4/E5DSEHr+ukP7HvoswNdZH04bmeebNzJIiIzMLqemZCukJSSlIey9NKpVCKpNBnJSqcF5ichokEinS07NUvxGS+9x+rrW0NAEAlSvpKqTntOjoCLk4fg5VZz+VdWyv+4CFhQUGDBiApUuXYuHChTh37hyOHTuGadOmlYlB0J/a/cgo1DOvLv+lk8PW0lR+PM+8j1/Lz8st74NIxbFIT1/GoGFXP5i3nYCW/edg/poDSE3PyLd+UqkU2/+6DPNaRkrN3lQ8bOua4sGT15BIpArptx+9zD6eyzPO8dvOsxBqa2G+rwdqGFWBqJIe+n/pBPe29ggKOSY/L0sixbodZzGgmzO6tmmMyhV1UdvEAEv/9zXESanYGHq+ZG6unPrcfq7dWjdCzepVMWf5HtyPjEJySjou3ojAyt+Po2NLO1iZ1yjSfaojzn4qgG+++abABQoEAmzcuLHIFfocTJ8+HbVq1cL27duxceNG6Ovrw8nJCRUrcjDah+ISUmBmqtxNp/9vN09cQnLeecXJ0BcpdwflpMWJ/8vr1Lguurd3RL061ZGWnomTF+9i9R/HceXWY2wLGp1nf/rpK/fx6m08poxwL9R9UcEZVKmIJ6/eKaXHJaTIj+fl2u2n6OkdhN8CvDD8qzYAgKwsCWav2IsVvytupOv3y06Ik1Kxaf538q6P51Gx6OEdhMgXytenovvcfq5FlfSwZ/V4jPjpN3T8Zr48v3s7ByyZNrhoN0lqqUBBjUyWe3+4queWhsDAQKU0AwMD3L9/X/5eQ0MD3333Hb777rtcyxgzZgzGjBmjkFarVi2FMnKEhISoWOPPX37RvSoj8d9fBurH4d0UjrV3aYBaJgbwX7EHh8+F40vXxrmWsXX/RWhpaqDfl82KXA8qgHx+7vP7nWBfvzZCFgzHtdtPMSFgC5JT0+HqZI3/jXKHjo42Fq07JD934rdu8BncAYFrD+LC9QiIKuniu36u2L3cB318ViDswYtivaXy7nP6uY5PTIGX3zqkpmUgaPoQmBrr4/7jKCzdeATfTlmLjQu+V2pVKo8ExbD3U1lvrSlQUFMe/jBT0VStUgFx4hSl9PjE7LTcPrHJ84oqIj63vOKP5wWAPp2bwn/FHly//STXoCY2PglHz4ejvUsDGBuKcimBikNsQnKuU2qrVsn5ZK78jHMs/PErRMcmYvCkNfLBxOeuPYRUKsOU4V2x/dAVPH0ZA2vz6vAb0Q0zlu3B8s3H5fmPnr+Ni9umYe4PfdBjVFAx31n59bn9XK/6/TjuPHyJC9uno7pRFQCAs70l6tWpjv7jVmD30Wv84PKv8j6mpLzfP6mofl1TPHryBllZEoX0exHZfe42FiZ55rWxNMG9x6+U0u89/jdv3bzzvi+vwW07D19FRqYEA91dClQOFc2diFewNq+hNBumgWX2mlV3I5SfcY5G1rVw495zpdlR1+88g6amBmz+HSvR0KoWNDQ0lGY4ZUmkCH/4EraWBfteoYL53H6ubz98iRrVqsgDmhz29c0AAPcf5z3Gp7wRCAQqvco6BjWkki6ujZCcmo6Dp28qpO84dBnVjarAsUGdvPO2boRHT9/i+u0n8rSsLAl2HbkKxwZ1UOODX2Af2vHXZQCAYx7TQbccuIjqRlXQrrnyujVUfA6cuonKFXXRo72DQvpA92Z49TYeV8Of5Jn39bsEONqaQUND8ZepU6PsmTOv3sb/e172/zt9MJNKqK0F+/q15edR8fjcfq6rG4kQ9TZeaebUtX+v8eEyDlR+cR4cqaRd8wZo7WQDv593ICk5Hea1jLDn2D84dekegn4aLP/07hv4J3YcuoJzW6ahVg0DAED/bs2xcfc5jJy+AVNGdodR1UrYtPs8Hj97iz8We8uvcelmBJZtOoouro1gZmqE9IzsAYV/7LuAlk2s0KmlnVK9rt9+ggeRr+EzpFOe62lQ8Tj29x2cuHgXP0/uj8oVdfH4eTT6ujVFxxZ2+P6nDfJWmKBpX2NgN2c06T0Tz19nb3y78o+TWDCpH7b8MhK/7TqH1LRMtGlmjdGDOuDkpXsIf5g9g+rCjce4dvsJJg/vCj1dIf6+/giiSnr4/qs2MK9phBHTy/bkhM/N5/Zz7dmnNUKPXsOgH1bBe3AHmBpXxf3HUQjadATVDCrnuxhgeaNR9htbVMKghlS21v9bLFh7AD+v+wvxicmwNKuutB2BRCKFRCJVGDSqI9TCliWjMW/VXkxfshOpaZmwszLFpkUjFKZfGxuKoKmhgaUbj/w760IAi1pGmPjtl/h+QLtcZz5tOXAJAoEAA9ydS/TeKds3P67FNO/umDqiW/Y2CU/ewMvvN4VtEjQ1NbIHc77XxL1222lERcfDe2A7BE37Gro6QjyPisH8tQex6o+T8vNkMhn6jF6OMUM6omcHR/gM7oDklHTcj3yNfuNW4tjfdz7p/ZYHn9PPdWOb2tiz+gcs3XgEC9YeRGx8EqobVUGnlg0xfqgbDPQrfZKvSVlQ3oMagexzn65URImJiViwYAGOHDmCtLQ0NG7cGH5+frC1VeyKuHfvHhYvXoxbt24hIyMDVlZW8Pb2lm9UmSMyMhL+/v64du0a9PT00K1bN/j6+kJX97/FoLKysrB+/Xrs2rULUVFRMDQ0RPv27TF27FilrRkKKiwsDFIZULMuu1DKg9qtx5d2FegTen52SWlXgT6Bl4/vQkOQvV9iSQkLC0NMSib2vlFt1fQe1VNgWEG7ROtaktS2XX7ixIk4duwYJk2ahKVLl0JTUxOenp6IivpvQNm7d+8wdOhQxMbGYs6cOViyZAmqVq2KUaNG4datW/LzxGIxPD09kZycjKCgIEyePBn79u3DtGnTFK65YsUKLF26FL169UJwcDC8vLwQGhqKH3/88ZPdNxERlU8CqD5QuKw39BS5+ykiIgJXrlxBXFwcPDw8UK1aNbx58wZVqlRRaL0oDTdu3MDp06exatUqtG/fHgDg7OyMDh06YN26dfJg5Pz584iLi8P27dtRu3Zt+XktW7bE4cOH0bhx9nTCLVu2QCwWIzQ0FAYG2f3Gmpqa8PX1xahRo+S7fO/fvx/u7u4YOTJ7d9nmzZsjJSUFv/zyC1JSUlChAvcdIiKiklPeu58K3VIjkUjg5+cHd3d3zJw5E0FBQXj79i0AYMaMGQgODi5UeVOmTIG7uzsuXbqEXr16wcHBAR4eHggPDy9s1eTu3LkDgUCAVq1aydP09PTQtGlTnDz5Xz99Vlb2fjHv76wtFAqho6Oj0Ed85swZuLi4yAMaAHBzc4NQKMTp06cVyvtwl26RSASZTPbZL0pIRERU1hU6qFm1ahX279+PH3/8Efv371f4Y926dWucPXu20JWIjo6Gv78/vLy8sHjxYqSlpcHHxweZmdkb2kkkEmRlZeX7kkj+W08hIyMDGhoaSgNItbW18fLlS6SlpQEAOnToACMjIwQEBODNmzeIi4vDsmXLkJycjD59+sjzRUREyFtjcgiFQpiZmSEiIkKe1r9/f+zZswd///03kpOTERYWhvXr16N3797cYoGIiEoc934qpN27d8Pb2xvDhg1TCCSA7K0CXrwo/FLlCQkJ2Lx5M6ysrAAAOjo6GDZsGG7evImmTZti6NChuHz5cr5l1KxZEydOZO8VY25uDolEgjt37si7kKRSKcLDwyGTySAWi6Grqwt9fX38/vvvGDFihHxgcOXKlbFq1SrUq/ffKH2xWJzrQF+RSISEhAT5+5EjRyIrKwvffvutPNjr3LkzZs+eXeivCRERUWGpw07bqih0UPPmzRs4ODjkekxHRwfJyXlvdJYXY2NjeUADQN4qkrMr9qxZsz5arlAolP+7ZcuWMDc3x4wZMxAYGAgjIyOsWbMGz58/BwB5C05MTAxGjx6NWrVqwc/PD1paWti1axd8fHywadMmNGjQIN9rymQyhRUYN2/ejA0bNmDKlCmws7NDZGQkli5dimnTpmH+/Pn5lERERESqKnRQY2hoiOfPn6N58+ZKxyIjI1GjRuG3gP+wFURbWxsAkJ6eDgCoU6fOR8ekvB9caGtrY8mSJRg/fjx69OgBALC2toanpydCQkJQpUr2ipa//vorEhISsGvXLujo6AAAWrRogT59+iAoKAirV6+W108sFitdMzExUR6AxcXFYf78+Zg0aZJ8V3MnJycYGBhg9OjR+Oabb2Bnp7xIHBERUXEQQPUpzWW9nafQQU2bNm2wevVquLq6wsjICEB2QJGYmIiQkBC0a9eu2CtZ2O4nALC1tcWhQ4fw9OlTyGQymJubY/bs2bCzs5MHTY8ePULdunXlAU3OvdSvX19hSrelpaXC2Bkge9zOs2fP0LdvXwDA8+fPkZGRobQOTs77Z8+eMaghIqKSUxzjYsp4VFPooGbs2LE4c+YMunbtCmdnZwgEAvzyyy94+PAhtLS04O3t/fFCCqmw3U85BAIBzM3NAQCxsbE4ePAgJk2aJD9uamqK48ePIy0tTT4NXSqV4vbt26hZs6b8PFdXV6xatQpxcXGoWrUqAODo0aPIyMhAmzZt5GUBwO3bt+Hk5CTPmzOL6/3yiIiISkJpj6kpyEK1H3P06FH4+PjAysoK+/fvL9T1Cx3UGBkZYceOHQgKCsLp06ehqamJe/fuoV27dhg7diz09fULW+RH1a1bt9B5Vq1ahTp16sDQ0BCRkZEIDg5Gw4YNFWY19e/fHzt27MDIkSMxZMgQaGlpYefOnbh//z58fX3l5w0YMACbN2+Gt7c3vL29ERMTg8DAQHTv3l3e/WRkZAQ3NzcsXboUWVlZaNiwIR4/foxly5bB0dERDRs2VP0LQURE9JnKWajW1NQUQUFBiI2NRUBAAOLj47Fo0aIClZGWloaAgAB5T1BhFWnxPSMjo89+Ro9YLMb8+fMRExMDY2Nj9OjRA97e3grTvBs0aID169dj+fLl8PPzg0QigaWlJVasWKGwTYJIJMLGjRvh7++PMWPGQFdXF+7u7gqBDwDMmzcPq1atwrZt2xAUFAQjIyN07twZ48aNy3V/IiIiouJUmg01BV2oNj/BwcEwNTVFrVq1irRendru/aQuuPdT+cK9n8oX7v1UPnyqvZ/iUjNxIr7yx0/OR3v9RFTVK9reT4MHD5Yvi5IjIyMDX3zxBX744Qd8++23+eZ/9uwZevTogS1btmDDhg0IDw8v+e6nqVOn5ntcIBBg3rx5hS2WiIiIPgNRUVEYP358nsePHz+ea3pERIR88kyO3BaqzcvcuXPRs2dP1K9fv1D1fV+hg5pLly4ppcXHxyMlJQUikUhpmwAiIiIqeQKoPlBYldwFXag2NydOnMD169dx6NAhFWpQhKDm/WnT77tw4QJmzZqFpUuXqlQhIiIiKpriGFNjYmKSZ2tMUXy4UO2H0tPTMW/ePIwZM0Zhj8WiKLbRqy4uLhg8eDDmzp1bXEUSERFRGZHfQrW5teDk2LhxIzQ0NNCtWzeIxWKIxWJkZmZCKpVCLBYjIyOjwHUo0uynvFhaWiIsLKw4iyQiIqIC0ijF2U8FWag2N48fP8bTp0/h4uKidMzJyQkzZ87EwIEDC1SHYg1qrly5Il+cjoiIiD4lAQQqLwlc9PwFWag2N8OHD0fv3r0V0tasWYPIyEgEBATIF9EtiEIHNcuXL1dKy8zMxP3793HmzBl4eXkVtkgiIiIq4wqyUC0A+Pn5ITQ0FHfu3AGQ3cLz4Ro2u3fvxps3b+Ds7FyoOhRLUCMUClGzZk2MHTuWQQ0REVFpEBRD95MK+Qu6UK1UKoVEIlGxornj4nufOS6+V75w8b3yhYvvlQ+favG9hLQs/J2U94DcgmhRSYwqulolWteSVKjZT2lpaZg4cSKuXr1aUvUhIiKiIhIIBCq9yrpCBTW6uro4fvw42LhDREREn5tCr1NTv359PHjwoCTqQkRERCrQEKj2KusKHdT4+vpi3bp1uHz5cknUh4iIiIpIIFDtVdYVaPbTlStX0KBBA1SsWBGzZs1CcnIyPD09IRKJYGxsrHCuQCDA3r17S6SyRERERHkpUFDzzTffYOvWrWjcuDH09fWhr69fwtUiIiKiwlJ1Q8uyrkBBzfsDg0NCQkqsMkRERFQ02bt0q15GWVZsG1oSERERlaZi3fuJiIiISk85730qeFDj6elZoIV5BAIBrl27plKliIiIqPA0ynwHkmoKHNQ0a9YMBgYGJVkXIiIioiIrcFAzevRoNG7cuCTrQkREREVVHGvNlPGGHo6pISIiUhPqsCqwKhjUEBERqYHsKd2qRTVlPSbilG4iIiJSCwVqqbl3715J14OIiIhUxCndREREpBbK+zYJ7H4iIiIitcCWGiIiIjVRzhtqGNQQERGpAwFU734p6zERu5+IiIhILbClhoiISE0UZI9GdcaghoiISE2U75CG3U9ERESkJthSQ0REpA4EAmio3FZTttt6GNQQERGpibIdkqiOQQ0REZGaUHmcsKxYqlFqOKaGiIiI1AJbaoiIiNSEylO62VJDREREpS1nRWFVXqr2XkVGRsLLywsODg5wcXGBv78/0tLSPppv4cKF6NatGxwdHdGkSRP07dsXBw4cKPT12VJDREREKhOLxfD09ISpqSmCgoIQGxuLgIAAxMfHY9GiRfnmTU1NxYABA2BhYQGZTIbDhw9jwoQJkEql6N69e4HrwKCGiIhITZTmisJbtmyBWCxGaGgoDAwMAACamprw9fXFqFGjYGlpmWfe6dOnK7xv3bo1Hj16hN27dxcqqGH3ExERkZoQqPhSxZkzZ+Di4iIPaADAzc0NQqEQp0+fLnR5+vr6yMzMLFQettQQERGRXFRUFMaPH5/n8ePHj+eaHhERgb59+yqkCYVCmJmZISIi4qPXlclkkEgkSElJwYkTJ3D+/HksXLiwUHVnUFMGCARAJV0+qvLg8r7A0q4CfUK1u/N5lwfbpraBuYn+J7lWaXY/icViiEQipXSRSISEhISP5r9w4QKGDRsGANDS0sJPP/2ELl26FKoO/EtJRESkJopjTImJiUmerTFFIZPJChRsNW7cGDt27EBSUhLOnDmDOXPmQFNTE/369SvwtRjUEBERkcpEIhHEYrFSemJiYr6DhHNUqlQJjRo1AgC4uLggIyMDgYGB6NOnDzQ1NQtUBw4UJiIiUgMCZHc/qfRS4fqWlpZKY2cyMjLw7NmzAgU1H7Kzs0NSUhJiY2MLnIdBDRERkZoozdlPrq6uuHjxIuLi4uRpR48eRUZGBtq0aVPo8q5du4ZKlSqhatWqBc7D7iciIiI1UYrjhDFgwABs3rwZ3t7e8Pb2RkxMDAIDA9G9e3eFlho/Pz+Ehobizp07AIB79+5h0aJF6NKlC2rWrImUlBScPHkSO3bswMSJE6GlVfBQhUENERERqUwkEmHjxo3w9/fHmDFjoKurC3d3d/j6+iqcJ5VKIZFI5O+NjIwgEomwcuVKREdHo3Llyqhbty5WrFiBjh07FqoOAplMVsa3r1JvYWFhkAGwtm1U2lWhT+Dh66TSrgJ9Qs2GLCntKtAnkDOlO2cQbEkICwtDSoYEb4UmKpVjnBGFCkLNEq1rSWJLDRERkZooze6nzwEHChMREZFaYEsNERGRmlBtUnbZx6CGiIhIHQiKofupjMdE7H4iIiIitcCWGiIiIjUgAKChYlNLGW+oYVBDRESkLjj7iYiIiEgNsKWGiIhITZT3lhoGNURERGpBUAxTust2VMSghoiISE1olO2YRGUcU0NERERqgS01REREakAA1VcULusNPQxqiIiI1ER5HyjM7iciIiJSC2ypISIiUhPc0JKIiIjUAmc/EREREakBttQQERGpCXY/ERERkVrg7CciIiIiNcCWGiIiIjVRzhtqGNQQERGpAwEADRX7n8p6UMSghoiISE2U9aBEVRxTQ0RERGqBLTVERETqQADVm2rKeFMPgxoiIiI1Ud7XqWH3ExEREakFttQQERGpifK++B6DGiIiIjVR2jFNZGQk/P39ce3aNejp6aFbt27w9fWFrq5unnmSkpLw22+/4cyZM4iMjISWlhbs7OwwYcIE2NnZFer67H4iIiIilYnFYnh6eiI5ORlBQUGYPHky9u3bh2nTpuWb79WrV9i6dStatGiBxYsXIyAgAFKpFAMGDMDt27cLVQe21BAREamLUmyq2bJlC8RiMUJDQ2FgYAAA0NTUhK+vL0aNGgVLS8tc89WqVQtHjx6Fnp6ePK1Fixbo0KEDNm/ejICAgALXgS01REREakKg4n+qOHPmDFxcXOQBDQC4ublBKBTi9OnTeearUKGCQkADADo6OrC0tMTbt28LVQe21BAREZFcVFQUxo8fn+fx48eP55oeERGBvn37KqQJhUKYmZkhIiKiUHVISUnB3bt30bNnz0LlY1BDRESkBgTy/ykdYrEYIpFIKV0kEiEhIaFQZS1ZsgSpqakYPHhwofIxqCEiIlITqsY0MgAmJiZ5tsYUqUyZDIJCzDXft28fNm7ciOnTp6NOnTqFuhbH1BAREakLgYovFYhEIojFYqX0xMTEXFtwcnP+/HlMnToVXl5eGDRoUKHrwKCGiIiIVGZpaak0diYjIwPPnj3Lc+bT+27dugUfHx906dIFkyZNKlIdGNQQERGpBVXnPqnWXOPq6oqLFy8iLi5Onnb06FFkZGSgTZs2+eaNiIjA8OHD0aRJEwQEBBSqu+p9DGqIiIjUhECg2ksVAwYMQOXKleHt7Y2zZ88iNDQUc+bMQffu3RVaavz8/NCgQQP5+5iYGHh5eUFbWxvfffcdbt++jRs3buDGjRu4c+dOoerAgcJERESkMpFIhI0bN8Lf3x9jxoyBrq4u3N3d4evrq3CeVCqFRCKRv3/06BGioqIAAEOHDlU4t2bNmjhx4kSB68Cg5l+xsbFwcXFBQEAA+vTpU9rVISIiKrTS3vvJwsIC69aty/ecwMBABAYGyt87Ozvj/v37xXJ9BjVERETqorSjmlLGMTVERESkFsptULNt2za0b98e9vb28PT0xLNnz5TOCQ0NRa9evdCoUSM4Oztj+PDhePnyJQBg165dsLGxwa1bt+Dp6Ql7e3u4ubnh7NmzkEqlWLJkCVq2bAkXFxf8/PPPkEqln/oWiYionCnNvZ8+B+Wy++nkyZP46aef0KdPH3Tt2hXh4eGYMGGCwjm//vorFi5cCA8PD/zwww/IzMzExYsXERsbi5o1a8rPmzJlCgYOHIjvvvsOa9aswdixY9GnTx8kJSUhMDAQN2/exLJly2BtbY3u3bt/6lslIqLyohhmMJX1uKZcBjWrVq1C06ZN5duZt27dGqmpqQgODgaQvfrh8uXL0b9/f8yePVuer2PHjkplDRkyBAMHDgQAVK9eHd27d0dYWBi2bdsmL/vEiRM4dOgQgxoiIqISVO6CGolEgtu3byutVujm5iYPaq5fv47U1FR4eHh8tLwWLVrI/21ubg4AcHFxUTjHwsICkZGRKtb885CUko65q/Yh9Ng/iBOnwKpOdYwf2gl9Ozf9aN7o2ETMCArF4XPhSE3LQEPrmvjfyO5o08xG6dxTl+5hXvB+hD94CT1dIdxaNcSssb1QzaBynuWfunQPvX2WAwAeHQ2EoX4l+bHANQcwf+1fSnl0hFp4fX5JAe6c8pOSmo7Vm4/g2LlbECemok6tavD0aIvOrvb55nvzLgG/7z6D+xGv8PBJFJKS0zB9nAfcOyp+P716E4te3y3Is5zmTawRNOvbYrkXyl9FXW38b1hb9GrTAFUr6+Hh83dYsuVv7Dr18fVEWtnXwYSBLdGwbnXo6WrjaVQcNv11A7/uvQqpVCY/b9qwtujUrB5qG1eBnq42Xsck4tQ/kfjlj/N4/rZwGyOWN2W8oUVl5S6oiY2NRVZWFgwMDBTSjYyM5P+Oj48HABgbG3+0vMqV//sjKxQKAUBpjwttbW1kZGQUtcqflW9+XIt/7jzFDJ+eqGdmjB2HruK7/22AVCpDvy5OeeZLz8hET+8gJCSmImCiB6oZVMKv28/CY+wKhK4Yg5ZfWMnPPX/tIfqNW4nOrRri95+7ITo2CbOW70FP7yCc3PQjdITaSuUnpaRj3Lw/YVKtCqKi8/6ltyPIG6JKevL3Ghrl/VdA8Zg8bzPuPHyO0Z5fwqymEQ6fvoFpC/+EVCpDl7YOeeZ7EfUOh07dgHVdE7T4wgZHztzM9TwjAxHWLfRWSj998TY27TyNts3tiutW6CM2zeyHJtYmmLXuJB69iIFH+4ZY978+0BAIsOPk7TzztXG0wM6Agfg77BnGLT6AlLRMfOlihfmj3WBhWhVTVx6Rn1ulki52nryNB8/eITElA/XrGGHioFb40sUaLt8FIy4x9VPcatlUzn+llbugxsDAAFpaWoiNjVVIf/funfzf+vr6AIC3b9+iRo0an7J6n7Uj52/j5KV7WOs/FB5u2Z+kWze1xvPXsZgRFIo+nb6ApmbuY89D9lzA3YgoHF43Ac0a183O+4U1Wn8dgBnLQnFsw38tZ9ODQlHPzBgbA72gpaUJAKhjaogu3/2CzXsvwsujtVL5s5bvgX5lPXRu2RCL1h/K8x4cbM0UWnBIdeev3sOlGw8xx3cA3No4AACaNrbE67fxWPbbQXRq3TjP7wtHOwsc+f0nAMCdhy/yDGqE2lpoVN9MKX3lpkPQ1dFG5zb5twhR8ejUzBLtv6iL7+btxs5/A5hzN5+idvUqmPV9B+w6fUehxeV9X3dujEyJFAN+2oqUtEwAwOnrkahX2xBfd26sENRMWqb4M3z+1lM8fR2P7fMGomsLa/x+OPfvk/Iue5MD1aKash4TlbvZT5qammjQoAGOHj2qkH748GH5vx0dHaGnp4edO3d+6up91g6cvIlKFXTQq4OjQvrX3ZsjKjoBV8Of5J331E1Y1akuD2gAQEtLE/2+dMK120/x6m08AODV23j8c+cpvuraTB7QAICzfV3UMzPGgVPKv8z+vv4IG3efR9C0QdDULOs/kmXPqQu3UUFPiA6tGimku3f8AtGxYtx+8DzPvBoaRf8V9CIqBv+ER6Jjq8aoVEG3yOVQwXVrWR+JKekIPa3Y1fTH4ZswNRKhaf2aeeQEMiVSZGRKkJqeqZCekJSGtIysj177XUIKACBLwpmklLdyF9QAwMiRI3H16lVMnToVZ8+exapVq7B//3758cqVK2P06NHYsmULfvrpJ5w+fRonT55EYGAgwsLCSrHmpevu41ewNq+hEGwAgF297F9kdyNe5Z03Igp2VqZK6XZW2XnvPY5SKCMn/cNzP7xGaloGxvr/jpED28G+fu2P3kPLAfNg6DwG1m5TMXLGJjx/HfvRPJS/iKdvYF7LGFqait8X9cxN/j3+ukSuu/foVchkMvTsnHe3JxUvW/NqePDsHSQftMbcfvxGfjwvv+2/BqG2JuaPdkMNw0oQVdRB/46N4N6yPoK2Xcg1j6aGALpCLTSyrI6AUZ3x8HkM9p+7V3w3pIZKc++nz0G5634CgA4dOmDWrFlYvXo1Dhw4AHt7e/z8888YMGCA/Jzhw4fDwMAAGzZswO7du1GxYkU4OjrC0NCwFGteumITkmFuaqSUXrVKBfnx/PLqiyoo5xUp5s35/6q5nKsvqoDYfz+t5Zi3ej8kEhmmft8137qb16qGad7d0di6FnR1tHHt9lMEhRzFyYt3cTJkMkyN9fPNT3lLSExBzRoGSulVKuvJjxc3iUSKgyeuwbxWNdg3MC/28il3BiI9PImKV0qPS0yTH8/LtXuv0HPSZvz2Ux8M75kdiGZJpJi97gRW7LikdL5x1Yq4v+0H+fsrd1+gx6QQJKdlKp1L/1GDuEQl5TKoAbJ3E30/iAGgtPdE37590bdv31zz9+nTJ9c9onLbv+L9PS7KvHxC+Y9tFZ/f8Q8P5XXu+8nXbj/Bqi2nsGOpN/R0hflee0DXZgrvWze1RuumVuj87c8I2nQMgb4fn+lGecvvyZfEgl4X/nmAtzFijB2WfzBLJUCW+5iZjxyCvVUNhMz0wLV7rzBhyUEkp2XC1cEc/xvaFjpCLSz6/ZzC+TEJKWg3eh10tDVhbWaEsV+5YO/CIejuG4I3sUnFdTekZsptUEOFZ1ClIuJyaY2J+7f1JLfWlY/mFefkrSg/D8i91SdenKJwDZ/Zv6N7O3s4NjCTtwakpWf3zScmpUGorYXKFfMea/GFnTnqmRnjarh6TLcvLVUqV8i1NSbh3xkqosp5f3ovqr1Hr0BLSxNd2zcp9rIpb7HiVFTNpTWmauXsn7P8ZiUtHNMF0XHJGDxzu3ww8bmbTyGVyTBliCu2Hw/H09fx8vMlUhluPMjulr50+wWOX4nAjRAfjB/QQmFQMX2gnDfVMKihAmtgaYqdR64hK0uiMK7mTkT21hG2lspjZuR565niziPlMTc5abaWJgpl3Hn0Cp1b2imd+/417j2Owr3HUQg9dl2pXMfeM9HQqibO/jE133uSyTitW1X1zGvgyJkbyJJIFMbV5IylsaxTvDMIY+OTcO7KPbg2s4UBZ7J9Unci36JvOztoaggUxtU0sMhe/uLuk+g88zayrIGdJ28rzY66fv8VNDU1YGNmpBDUfOjVu0S8jklCvZrKXZ30H3XY6kAV5XKgMBVNt7b2SEpJx94TNxTS/9x/GSbVqqBpQ/N88z548kZhhlRWlgTb/rqCpg3NYVJNHwBgaqyPL+zqYNtflyF5b5bDlbBIPHz6Bu7t/pu6u2/1WKXXwG7OAIDfF32PoGlf53s/V8IiEfH8LZo2tCjYF4By1ba5HVJSM3Dy73CF9APHr6GagQh21h8fwF0YB0/8g6wsCXp04gDhT+3A+fuoXEEHPVrbKqQP7NQYr96JcfXeyzzzvo5JhKO1idKHCKcGtQAAr96J8722hWlVmBpVxuNXcUWsPZUHbKmhAuvU0g7tnOtj4vytSExOQ93a1bDz8FUcv3AHwbM95WuRjJnzO/48cAn/7J4JM5PsT1WDezTHr9vPYOiUdZjh0wPVqlbGuh1n8ejpG4SuGKNwnZljeqL36OUYOmUdvDxaIzouEbOW74WtpQkGdW8uP6/VF9ZKdTx37SGA7Cng769H0+rrAHz1pROszatDV6iNa3eeYlnIMVQ3FGHsN8rbX1DBtWhqA2cHK8xfGYrklHTUMjHEkTM3ceGfB5g9sb/8+2JO0A4cPP4Pdq2dBBPjqvL8x89nzyh8+e9MtLuPXkJPTwcA0KFlI3xo79ErqG5UBc2bWCkdo5J17EoETlx7jJ/HfonKFXTw+FUs+razQ8dm9fB9QKi8FSZogjsGdm6MJt+skK8AvHLnJSzw6YItc/rjt/3/IDU9E20czTHaozlOXnuM8MdvAQB2FsaYO6oT9p65iyev4yGVytDAwhjefZwRK07F8u0XS+3+ywJ1mMGkCgY1VCibFgyH/8p9CAg+kL1Ngnl1/Dp3qMI2CRKJNLuV5b1RgzpCbexZOQYzloVi8qLtSE3LRCPrmti+1FthNWEgO1jZtnQUAoIPYODEYOjpasOtVUPMHts719WEC8LGogY27j6PN+8SkJEpQY1qVdCn8xf48bsvUcOoStG+GCQ3328wVoUcRvDvRyFOTEGdWtXgP2mgwjYJUokUEqkUsg9Gk04N/F3h/fYDF7D9QPYU38v7FAfZ37r7FE9eROO7AR1UWuOGiu6bmdsx7dt2mOrp+u82CTHwmrtLYZsETU0BtDQ1FMZ3rN1zFVExifDu44ygCd2gq6ON56/jMT/kLFbt+m/209u4ZLyOScJoj+aoblAJWpoaePUuEYcvPcQvf57Hy+j8W3TKu3Ie00Ag+/A3jBp4+/YtNmzYgPPnz+PZs2eoWLEimjRpgokTJ6JOnToK50ZERCAwMBBXrlyBtrY22rZti6lTpypso/D06VOsW7cON2/exMOHD1G3bl2FdW1yrF+/Hnv37sWLFy+QlZWF2rVro3///hg0aNBHZwblJSwsDDIA1rbKn1hJ/Tx8zVkd5UmzIUtKuwr0CWyb2gbmJvpo1Kjkfo+HhYUhUyJDpRr1VCon6fUjaGsKSrSuJUktW2pu376NI0eOoG/fvnBwcIBYLEZwcDD69euHvXv3yrc+SEpKgqenJ4yNjbFo0SKkpaXhl19+wYgRI7B161b5J8GHDx/i9OnTsLe3hzSXT5o5EhMT4e7uDisrK2hra+PChQvw9/dHUlISRo4c+cnun4iIyqly3lSjlkHNF198gUOHDkFL67/bc3JygqurK3bs2AEfHx8AwB9//IHExETs2bNHvqhenTp14OHhgePHj6NTp04AgPbt26Njx+xxF1OmTEF4eDhyM27cOIX3LVq0wKtXr7B7924GNUREVOI4+6mUTZkyBe7u7rh06RJ69eoFBwcHeHh45Bk4FIRIJFIIaIDsjSxr1KiBt2/fytPu3LkDW1tbhVWCGzVqBH19fZw4cUKepkrffdWqVZGZyRUwiYio5JX3bRJKPagBgOjoaPj7+8PLywuLFy9GWloafHx85MGARCJBVlZWvi+JRJLvNaKiovDq1SvUrfvfhorp6enQ1lYeeCoUCvH48eMi309WVhaSk5Nx6tQphIaG4ptvvilyWURERFQwn0X3U0JCAjZv3gwrq+xZMDo6Ohg2bBhu3ryJpk2bYujQobh8+XK+ZdSsWVOhdeVD/v7+EIlE6N27tzzN3Nwcu3btQlpaGnR1s1fEfPXqFaKjo1GhQt6r4+bn6dOn6Ny5s/z9qFGjMHTo0CKVRUREVFACqD6kpqw31nwWQY2xsbE8oAEAS0tLAMCbN9k7v86aNQvJyXlvlghkt67kJTg4GCdOnMCKFStQpcp/03f79++PkJAQTJ8+HRMnTkR6ejqmTZsGDQ2NInc5mZiYYMeOHUhJScGVK1ewdu1aaGhoYOzYsUUqj4iIqMDKelSios8iqBGJRArvc7qE0tPTAWQP3v3YzPO8pkzv3r0bixcvxk8//YT27dsrHDM3N8e8efPg7++PPXv2AAA6d+4MV1fXjwZReREKhfKpcM7OzqhQoQIWLVqEgQMHolq1akUqk4iIiD7uswhqPqao3U/Hjx/HtGnTMGLECAwaNCjXfD169ECXLl3w5MkTVKlSBdWrV0e3bt2UAqCisrOzg0QiwcuXLxnUEBFRiSrvs5/KRFBTlO6ny5cv44cffkDPnj3xww8/fDSvtXX2kvsXLlzAkydPFMbeqOLatWsQCASoVatWsZRHRESUq+KYwVTGY6IyEdS8P2OpICIiIuDt7Y1atWqhb9++uHHjhvxYpUqVUK9e9oqLKSkpWLZsGZycnKCjo4MbN25gzZo18PHxUbhmamoqTp8+DQB4+fIlkpKScOjQIQBAs2bNYGBggMTERAwfPhw9evRAnTp1kJWVhYsXLyIkJAT9+/eHkZGRil8FIiIiyk+ZCGoK6+bNm0hMTERiYiK+/lpxp+ZmzZohJCQEQPb6Mw8ePMCuXbuQkpKCunXrYsaMGejTp49CnpiYGKWF9XLeb9q0Cc7OztDR0YGFhQU2bNiAN2/eQFdXF2ZmZpg1axZ69epVcjdLRET0r9JuaImMjIS/vz+uXbsGPT09dOvWDb6+vvIZxnk5ePAg/vrrL9y4cQNv377Fjz/+CC8vr0Jfv9SDmsDAQKU0AwMD3L9/v8hl9unTRykwyY2uri7WrVv30fNq1ar10foIhUIEBAQUuI5ERETFrhSjGrFYDE9PT5iamiIoKAixsbEICAhAfHw8Fi1alG/eQ4cO4fnz52jXrh22bt1a5DqUelBDREREZd+WLVsgFosRGhoq3xRaU1MTvr6+GDVqlHy5ltwsWbJEvpSKKkHNZ7GiMBEREalOoOJ/qjhz5gxcXFzkAQ0AuLm5QSgUysel5kWV7YgUyimWUoiIiKjUlebeTxEREUqtMUKhEGZmZoiIiFCt8AJi9xMREZGaKI4hNVFRURg/fnyex48fP55rulgsVlpMF8heYDchIaEYavZxbKkhIiKiEiOTyfJc9b+4saWGiIhIDQigeheSANl7GObVGpMfkUgEsVislJ6YmJjvIOHixJYaIiIitSFQ8VV0lpaWSmNnMjIy8OzZMwY1REREVHa4urri4sWLiIuLk6cdPXoUGRkZaNOmzSepA4MaIiIiNVGas58GDBiAypUrw9vbG2fPnkVoaCjmzJmD7t27K7TU+Pn5oUGDBgp5Hz16hEOHDsm3IHrw4AEOHTr00angH+KYGiIiIjVRmtskiEQibNy4Ef7+/hgzZgx0dXXh7u4OX19fhfOkUikkEolC2l9//YXly5fL34eGhiI0NBQ1a9bEiRMnClwHgUwmk6l2G1SSwsLCIANgbduotKtCn8DD10mlXQX6hJoNWVLaVaBPYNvUNjA30UejRiX3ezwsLAwSqQzGdeqrVM7bp/egqSEo0bqWJLbUEBERqYlPNHP6s8WghoiISC2ovtVB6e/zrRoOFCYiIiK1wJYaIiIidVG2G1pUxqCGiIhITZTzmIZBDRERkVoohrVmynpUxDE1REREpBbYUkNERKQmVJ/9VLYxqCEiIlIX5TumYfcTERERqQe21BAREakBAVRvqCnrDT0MaoiIiNREed8mgd1PREREpBbYUkNERKQmOPuJiIiI1AK7n4iIiIjUAIMaIiIiUgvsfiIiIlIT5b37iUENERGRmijvA4XZ/URERERqgS01REREaoLdT0RERFTmcZsEdj8RERGRmmBLDRERkboo600tKmJQQ0REpCY4+4mIiIhIDbClhoiISE1w9hMRERGphXIe07D7iYiIiNQDW2qIiIjUAReqYVBDRESkLsr77CcGNURERGqCA4Xps5aZmQmZTIYHd8NKuyr0CWRKZKVdBfqEtk1tU9pVoE/AWF8XmZmZJX6dzIwMlf9WZGZkQCgUFlONPj0GNZ85wb9hdzkPvssNoSafdHlibqJf2lWgTyAzM1P+u7ykFFcgIhQKy3RQI5DJZPxoSERERGUep3QTERGRWmBQQ0RERGqBQQ0RERGpBQY1REREpBYY1BAREZFaYFBDREREaoFBDREREakFBjVERESkFhjUEBERkVpgUENERERqgUENERERqQUGNURERKQWGNRQmRAfH4/Ro0fDyckJNjY2OHbsWGlXiT4wZcoUuLu7l3Y1qBTFxsbCxsYGu3btKu2qUDmlVdoVICqIdevW4dKlS5g/fz4MDQ1hYWFR2lUiIqLPDIMaKhMiIiJgY2ODDh06lHZViIjoM8XuJyp1Od0Wly5dQq9eveDg4AAPDw+Eh4cDAGxsbHD8+HFcvXoVNjY2sLGxKeUaU37yeo4AIJVK8dtvv+HLL79Ew4YN0bJlS4wdOxaJiYkAgGXLlsHR0RHh4eHo168fGjdujF69eiE8PBzp6emYMWMGmjVrBldXV2zYsKGU7pBybNu2De3bt4e9vT08PT3x7NkzpXNCQ0PRq1cvNGrUCM7Ozhg+fDhevnwJANi1axdsbGxw69YteHp6wt7eHm5ubjh79iykUimWLFmCli1bwsXFBT///DOkUumnvkUqYxjU0GchOjoa/v7+8PLywuLFi5GWlgYfHx9kZmZi69ataNKkCRo0aICtW7di69atpV1dykN+zxEA5syZg4ULF6Jt27ZYvXo1pk+fjooVKyIlJUVeRmZmJvz8/DBw4EAsW7YMEokEY8aMgZ+fH3R1dbF48WJ07NgRAQEB+Oeff0rrVsu9kydP4qeffoKzszOWL1+O5s2bY8KECQrn/Prrr5g8eTLs7OywfPlyzJ07F3Xq1EFsbKzCeVOmTEHHjh2xfPlyGBsbY+zYsZg7dy6ioqIQGBiIQYMGYc2aNThw4MCnvEUqg9j9RJ+FhIQEbN68GVZWVgAAHR0dDBs2DDdv3kTTpk0hEomgpaUFBweH0q0o5Su/52hoaIg///wTP/zwA0aMGCHP4+bmplBGZmYmfH194erqCiC7dWfkyJFwcHDA1KlTAQDNmzfHoUOHcOjQITRp0uQT3R29b9WqVWjatCkCAgIAAK1bt0ZqaiqCg4MBAImJiVi+fDn69++P2bNny/N17NhRqawhQ4Zg4MCBAIDq1auje/fuCAsLw7Zt2+RlnzhxAocOHUL37t1L+taoDGNLDX0WjI2N5X8IAcDS0hIA8ObNm9KqEhVBfs/x4sWLkMlk8PDwyLcMDQ0NNG/eXP7e3NwcANCiRQt5mqamJszMzPD69etirD0VlEQiwe3bt9GpUyeF9PcD1OvXryM1NfWjzxtQfLY5z9vFxUXhHAsLC0RFRalQayoPGNTQZ0EkEim819bWBgCkp6eXRnWoiPJ7jvHx8dDS0oKhoWG+Zejq6kIoFCqVUblyZaWy+f1ROmJjY5GVlQUDAwOFdCMjI/m/4+PjAWQHuh/z/rPNefa5fS9lZGQUtcpUTjCoIaJPQl9fH1lZWYiJiSntqpCKDAwMoKWlpTQ25t27d/J/6+vrAwDevn37KatG5RyDGiL6JJo3bw6BQICdO3eWdlVIRZqammjQoAGOHj2qkH748GH5vx0dHaGnp8fnTZ8UBwoT0SdhYWGBAQMGYOnSpUhISICLiwvS0tJw6tQpjBkzBtWrVy/tKlIhjBw5Et7e3pg6dSq6du2K8PBw7N+/X368cuXKGD16NBYtWgSpVIqOHTtCKpXi0qVL6NatGxo1alSKtSd1xaCGiD6Z6dOno1atWti+fTs2btwIfX19ODk5oWLFiqVdNSqkDh06YNasWVi9ejUOHDgAe3t7/PzzzxgwYID8nOHDh8PAwAAbNmzA7t27UbFiRTg6On50XBVRUQlkMpmstCtBREREpCqOqSEiIiK1wKCGiIiI1AKDGiIiIlILDGqIiIhILXD2E30WNmzYgICAALRt21a+dwwAvHjxAh06dFA6397eXr4vTI7IyEj4+/vj2rVr0NPTQ7du3eDr6wtdXV35Ofnt8H327NkCrX5KRZOYmIgFCxbgyJEjSEtLQ+PGjeHn5wdbW1uF8+7du4fFixfj1q1byMjIgJWVFby9veV7QeUoyPPOysrC+vXrsWvXLkRFRcHQ0BDt27fH2LFjlVaspeLx9u1bbNiwAefPn8ezZ89QsWJFNGnSBBMnTkSdOnUUzo2IiEBgYCCuXLkCbW1ttG3bFlOnTlVYqfjp06dYt24dbt68iYcPH6Ju3boKU8dzrF+/Hnv37sWLFy+QlZWF2rVro3///hg0aBAEAkGJ3zd9HhjUUKmLjo7GihUr8p3mOWHCBDg7O8vffzgFWCwWw9PTE6ampggKCkJsbCwCAgIQHx+PRYsWyc/LbYfvyZMnQ09PjwFNCZs4cSLCwsIwadIkGBkZYcOGDfD09MSePXtgYmICIHtF2qFDh6J27dqYM2cOdHR08Mcff2DUqFH4888/0bhxYwAFf94rVqzAmjVrMGbMGDg4OCAiIgKLFy/GixcvsHr16lL5Oqi727dv48iRI+jbty8cHBwgFosRHByMfv36Ye/evahRowYAICkpCZ6enjA2NsaiRYuQlpaGX375BSNGjMDWrVuhoZHdkfDw4UOcPn0a9vb2kEqlyGvCbmJiItzd3WFlZQVtbW1cuHAB/v7+SEpKwsiRIz/Z/VMpkxGVskmTJsl+/PFH2eDBg2Xff/+9wrHnz5/LrK2tZX/99Ve+ZQQHB8vs7e1lMTEx8rS9e/fKrK2tZY8ePcozX075a9euVe0mKF/Xr1+XWVtby44fPy5PS0lJkbm4uMjmzJkjTwsNDZVZW1vLnj17Jk9LT0+XNW3aVLZgwQJ5WkGfd8eOHWU//vijQl3WrFkjq1+/viw5OblY75GyJSQkyDIzMxXSYmJiZHZ2drJly5bJ04KDg2WNGzeWvXv3Tp5269YtmbW1tezIkSPyNIlEIv/35MmTZd26dStwXSZMmCDr3LlzUW6DyiiOqaGPmjJlCtzd3XHp0iX06tULDg4O8PDwQHh4uMplX716FceOHcPEiRNVKufMmTNwcXFRaLZ2c3ODUCjE6dOn88y3f/9+CAQCuLu7q3R9dVISz/vOnTsQCARo1aqVPE1PTw9NmzbFyZMn5WlZWVkAlDc41NHRUfiEXtDnnZWVpbQRpkgkgkwmy/MTf3lSEs9aJBJBS0uxE8DAwAA1atRQ2Afqzp07sLW1VWihbdSoEfT19XHixAl5Wk6LTVFUrVoVmZmZRc5PZQ+DGiqQ6Oho+Pv7w8vLC4sXL0ZaWhp8fHzkvzAkEgmysrLyfUkkEoUyJRIJ5syZg5EjR36062fmzJmwtbWFi4sLpk2bJt8BOEdERAQsLS0V0oRCIczMzBAREZFnuQcOHICTk5O8SZyyFffzzsjIgIaGhtIfKG1tbbx8+RJpaWkAslepNTIyQkBAAN68eYO4uDgsW7YMycnJ6NOnjzxfQZ93//79sWfPHvz9999ITk5GWFgY1q9fj969e3MV43+VxM/2h6KiovDq1SvUrVtXnpaeni7fgf19QqEQjx8/LvL9ZGVlITk5GadOnUJoaCi++eabIpdFZQ/H1FCBJCQkYPPmzbCysgIA6OjoYNiwYbh58yaaNm2KoUOH4vLly/mWUbNmTYVPYH/88QdSUlIwdOjQPPMIhUIMHDgQrVq1gkgkws2bN7F69WqEh4dj+/bt8l+KYrE414GfIpEICQkJuZZ97949PHjwALNnz/7Y7Zc7xf28zc3NIZFIcOfOHfm4GKlUivDwcMhkMojFYujq6kJfXx+///47RowYIR8YXLlyZaxatQr16tWTl13Q5z1y5EhkZWXh22+/lbfMdO7cmc/8PSXxs/0hf39/iEQi9O7dW55mbm6OXbt2IS0tTT64+9WrV4iOjkaFChWKdC9Pnz5F586d5e9HjRqV7+8XUj8MaqhAjI2N5b/0AMg/Jb958wYAMGvWLCQnJ+dbhlAolP87JiYGQUFBmD9/vkJ6btedOXOm/H2zZs1gZWWFESNG4OjRo+jatWu+15TJZHnOfNi3bx+0tbXh5uaWbxnlUXE/75YtW8Lc3BwzZsxAYGAgjIyMsGbNGjx//hzAf10MMTExGD16NGrVqgU/Pz9oaWlh165d8PHxwaZNm9CgQYN8r/nh8968eTM2bNiAKVOmwM7ODpGRkVi6dCmmTZuG+fPnF+Iror6K+1l/KDg4GCdOnMCKFStQpUoVeXr//v0REhKC6dOnY+LEiUhPT8e0adNybdErKBMTE+zYsQMpKSm4cuUK1q5dCw0NDYwdO7ZI5VHZw6CGCuTDT8U5LSTp6ekAgDp16nx0jML7f2yWLl0Ka2trNG3aFGKxGADkTdlisRgVKlRQ6pfP0aZNG1SoUAG3b9+WBzUikUhezvsSExOVuimA7D9+Bw8eROvWraGvr59vvcuj4n7e2traWLJkCcaPH48ePXoAAKytreHp6YmQkBD5H7tff/0VCQkJ2LVrF3R0dAAALVq0QJ8+fRAUFCSfsVSQ5x0XF4f58+dj0qRJ8i4IJycnGBgYYPTo0fjmm29gZ2dXuC+MGiruZ/2+3bt3Y/Hixfjpp5/Qvn17hWPm5uaYN28e/P39sWfPHgDZrWiurq4fDaLyIhQK5bt/Ozs7o0KFCli0aBEGDhyIatWqFalMKlsY1FCxKGwTdWRkJK5evQonJyel85ycnLB27VqldUnyY2lpqTR2JiMjA8+ePUPfvn2Vzr927RpevXqFSZMmFfga9J+idEnY2tri0KFDePr0KWQyGczNzTF79mzY2dnJ/5A+evQIdevWlQc0QPYfzPr16+PWrVvytII87+fPnyMjI0NpHZyc98+ePWNQUwBF7X46fvw4pk2bhhEjRmDQoEG55uvRowe6dOmCJ0+eoEqVKqhevTq6deumFAAVlZ2dHSQSCV6+fMmgppxgUEPForBN1H5+fkqftOfNmwddXV1MmDAh30XyTp48iZSUFPknMgBwdXXFqlWrEBcXh6pVqwIAjh49ioyMDLRp00apjH379qFChQpo165dge6PFBW1S0IgEMDc3BwAEBsbi4MHDyoElqampjh+/LjCOAupVIrbt2+jZs2a8vMK8rxNTU0BZK+b8n7wnDOz5/3yKG9FedaXL1/GDz/8gJ49e+KHH374aF5ra2sAwIULF/DkyROFsTequHbtGgQCAWrVqlUs5dHnj0ENFYv3ZzUUxIefnoHsZvAKFSooLLI3f/58CAQC2NvbQyQS4datWwgODkbDhg3RsWNH+XkDBgzA5s2b4e3tDW9vb8TExCAwMBDdu3dX6n7KysrC4cOH0bFjR+jp6RXyTgko/PMGgFWrVqFOnTowNDREZGSk/Dm+P6upf//+2LFjB0aOHIkhQ4ZAS0sLO3fuxP379+Hr6ys/ryDP28jICG5ubli6dCmysrLQsGFDPH78GMuWLYOjoyMaNmyo+heiHCjss46IiIC3tzdq1aqFvn374saNG/JjlSpVkg/4TklJwbJly+Dk5AQdHR3cuHEDa9asgY+Pj8I1U1NT5dP0X758iaSkJBw6dAhA9hg7AwMDJCYmYvjw4ejRowfq1KmDrKwsXLx4ESEhIejfvz+MjIxU/CpQWcGghj5rdevWxZ9//omtW7ciLS0N1atXh4eHB8aOHasw5kYkEmHjxo3w9/fHmDFjoKurC3d3d4U/hDnOnTuHuLg4rk3ziYnFYsyfPx8xMTEwNjZGjx494O3trTAotEGDBli/fj2WL18OPz8/SCQSWFpaYsWKFQrdkQV93vPmzcOqVauwbds2BAUFwcjICJ07d8a4ceNUWv+E8nbz5k0kJiYiMTERX3/9tcKxZs2aISQkBED24PAHDx5g165dSElJQd26dTFjxgyFIBfIHjw+btw4hbSc95s2bYKzszN0dHRgYWGBDRs24M2bN9DV1YWZmRlmzZqFXr16ldzN0mdHIOMKVERERKQG+FGFiIiI1AKDGiIiIlILDGqIiIhILTCoISIiIrXAoIaIiIjUAoMaIiIiUgsMaoiIiEgtMKghIiIitcCghugzs2vXLtjY2MhfDRo0gKurK6ZOnYo3b958kjq0b98eU6ZMkb+/dOkSbGxscOnSpUKV888//2DZsmW57qitqilTphRo48MhQ4ZgyJAhRbpG+/btMWLEiCLlza/M97+2RFR8uE0C0WcqICAAdevWRVpaGq5evYrg4GBcvnxZvhnnp2RnZ4etW7fK9+0pqOvXr2P58uXo3bs3RCJRCdWOiCgbgxqiz5SVlZV8J/LmzZtDIpFg5cqVOHbsGHr06JFrntTU1BLZpLNSpUpwcHAo9nKJiIoTu5+IyoicoOLVq1cAsrtfHB0dcf/+fXz77bdwdHTE0KFDAQAZGRlYuXIlunTpgoYNG6J58+aYOnUqYmNjFcrMzMzEggUL0LJlS9jb22PgwIG4deuW0rXz6n66efMmRo4cCWdnZzRq1AgdO3bE3LlzAQDLli3DggULAAAdOnSQd6e9X8bBgwfRv39/ODg4wNHREV5eXrhz547S9Xft2gU3Nzc0bNgQX375JUJDQ4v0NcyxfPly9OvXD82aNUOTJk3Qu3dvbN++HXlthXf06FF0794djRo1QocOHbBp0yalc5KSkjB//ny0b98eDRs2ROvWrTF37lykpKSoVFciKji21BCVEU+fPgUAGBgYyNMyMzMxatQoDBgwAMOHD4dEIoFUKoW3tzeuXbsGLy8vNGnSBC9fvsSyZctw69Yt7Ny5E7q6ugCAn376CaGhofj222/RsmVLPHz4ED4+PkhOTv5ofc6ePYtRo0ahbt26mDJlCkxMTPDy5UucP38eANCvXz8kJCQgJCQEy5cvR7Vq1QBA3oW1evVqLFmyBH369MGoUaOQmZmJdevWYdCgQdi+fbv8vF27dmHq1Kno0KEDpkyZgsTERCxfvhwZGRlF3mn75cuX6N+/P0xNTQEAN27cgL+/P968eQMfHx+Fc+/evYt58+bBx8cHRkZG2LdvH+bOnYvMzEx4eXkByG4hGzx4MF6/fo2RI0fCxsYGDx8+RFBQEB48eIANGzZAIBAUqa5EVAgyIvqs7Ny5U2ZtbS27ceOGLDMzU5aUlCQ7efKkrHnz5jJHR0dZdHS0TCaTySZPniyztraW7dixQyH//v37ZdbW1rLDhw8rpN+6dUtmbW0t+/3332UymUz26NEjmbW1tWzevHkK5+3du1dmbW0tmzx5sjzt4sWLMmtra9nFixflaR07dpR17NhRlpaWlue9/PrrrzJra2vZ8+fPFdJfvXola9CggWzOnDkK6UlJSbKWLVvKxo0bJ5PJZDKJRCJr1aqVrHfv3jKpVCo/78WLFzI7OztZu3bt8rx2jsGDB8sGDx6c53GJRCLLzMyULV++XNasWTOF67Rr105mY2Mju3v3rkKeYcOGyZo0aSJLSUmRyWQyWXBwsKx+/fqyW7duKZx36NAhmbW1tezUqVMKZb7/tSWi4sPuJ6LP1FdffQU7Ozs0adIEI0aMgJGREdauXQsjIyOF89zc3BTenzx5EiKRCO3atUNWVpb8ZWtri2rVquHy5csAIO8G6t69u0L+L7/8Elpa+TfiRkZG4tmzZ/Dw8ICOjk6h7+3cuXPIyspCz549Feqoo6MDJycneR0jIyPx9u1buLu7K7R01KxZE46OjoW+bo4LFy5g6NCh+OKLL2Braws7OzsEBQUhPj4eMTExCudaWVmhfv36Cmnu7u5ISkrC7du3AWR/za2srGBra6twP61atYJAIJDfDxGVLHY/EX2m5s+fD0tLS2hpacHQ0BDGxsZK5+jp6aFSpUoKaTExMRCLxWjYsGGu5cbFxQEA4uPjAUDeLZRDS0sL+vr6+dYtZ2xO9erVC3IrSt69ewcA8PDwyPV4TrdSTl0/DORy0l6+fFnoa9+6dQteXl5o1qwZ5syZgxo1akBbWxvHjh3D6tWrkZaWpnSd3K4N/Pc1jImJwdOnT2FnZ5frNXPug4hKFoMaos+UpaWlfPZTXnIbp1G1alXo6+vj119/zTVPxYoVAUAeuERHRysEJ1lZWfI/1nnJGddT1HVzqlatCgAICgqSj2vJ77ycIOh9uaUVxIEDB6ClpYXg4GCFVqZjx47len5+1875GlatWhU6OjqYN29ermXk3AcRlSwGNURqpm3btjhw4ACkUins7e3zPM/Z2RkAsG/fPoVWnb/++gtZWVn5XsPCwgJmZmbYuXMnhg0bBqFQmOt5Oenp6ekK6a1atYKWlhaePXum1H324XWqVauG/fv3Y9iwYfIg7uXLl7h+/XqurVcfIxAIoKmpqTDIOC0tDXv37s31/IcPH+LevXsKXVD79+9HxYoV5S0zbdu2RXBwMPT19VG7du1C14mIigeDGiI1061bN+zbtw/ff/89hgwZgsaNG0NbWxuvX7/GpUuX0KFDB3Tq1AmWlpbo0aMHNm7cCC0tLbRo0QIPHz7EunXrlLq0cjN9+nSMGjUKX331FYYOHQoTExNERUXh7Nmz+PnnnwEA1tbWAICNGzeid+/e0NLSgoWFBWrVqoWxY8diyZIleP78OVxdXSESifDu3TuEhYVBT08PY8eOhYaGBsaNG4dp06Zh9OjR+OqrryAWi7F8+fJcu4UKok2bNvjtt98wceJE9O/fH/Hx8Vi3bl2egZmxsTFGjRoFHx8fVKtWDXv37sX58+fh6+srXxPI09MTR44cweDBgzF06FDY2NhAKpUiKioK586dw7fffptvgElExYNBDZGa0dTUxKpVq7Bp0ybs2bMHa9asgaamJmrUqAEnJyd5oAEAc+fOhZGREXbv3o2QkBDY2tpi2bJlmDBhwkev07p1a2zevBkrVqyAv78/0tPTUaNGDYWtC5ydnTFixAjs3r0b27dvh1QqxaZNm+TplpaW2LRpEw4cOICMjAxUq1YNDRs2xMCBA+Vl9OvXDwDw66+/wsfHBzVr1sSIESNw5cqVIg3AdXFxwbx587B27VqMHDkS1atXx1dffQUDAwP873//Uzrf1tYWffr0wbJly/DkyRMYGxtj6tSp8jWBAKBChQr4/fffsWbNGmzduhUvXryArq4uTExM0KJFC9SsWbPQ9SSiwhPIZHmsNkVERERUhnBKNxEREakFBjVERESkFhjUEBERkVpgUENERERqgUENERERqQUGNURERKQWGNQQERGRWmBQQ0RERGqBQQ0RERGpBQY1REREpBYY1BAREZFa+D/CdhBolX930AAAAABJRU5ErkJggg==", + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " [18280/18280 23:32, Epoch 10/10]\n", + "
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
EpochTraining LossValidation LossAccuracyMacro F1Weighted F1
10.3401000.3432000.8962440.6556610.879469
20.1783000.2240330.9308900.8597720.925342
30.1542000.2080340.9412840.8870120.939485
40.1212000.2166600.9403720.8807160.939431
50.0999000.2542550.9405540.8890880.938300
60.0658000.2674290.9427430.8976820.942815
70.0612000.2825090.9454780.8987970.943881
80.0368000.3017810.9438370.9038160.944163
90.0354000.3170260.9425600.9022410.942071
100.0142000.3132590.9467540.9049550.946129

" + ], "text/plain": [ - "

" + "" ] }, "metadata": {}, "output_type": "display_data" - } - ], - "source": [ - "cc.plot_conf_mat(\n", - " conf_mat_dict={\"Geneformer\": all_metrics_test[\"conf_matrix\"]},\n", - " output_directory=output_dir,\n", - " output_prefix=output_prefix,\n", - " custom_class_order=[\"nf\",\"hcm\",\"dcm\"],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "0038d701-ab94-46d2-b390-803be0850019", - "metadata": {}, - "outputs": [ + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n" + ] + }, { "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " [457/457 00:11]\n", + "
\n", + " " + ], "text/plain": [ - "
" + "" ] }, "metadata": {}, "output_type": "display_data" }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at /n/home01/ctheodoris/models/210602_111318_geneformer_27M_L6_emb256_SL2048_E3_B12_LR0.001_LSlinear_WU10000_Oadamw_DS12/models/ were not used when initializing BertForSequenceClassification: ['cls.predictions.transform.LayerNorm.weight', 'cls.predictions.decoder.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.bias', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.dense.bias', 'cls.predictions.decoder.weight']\n", + "- This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of BertForSequenceClassification were not initialized from the model checkpoint at /n/home01/ctheodoris/models/210602_111318_geneformer_27M_L6_emb256_SL2048_E3_B12_LR0.001_LSlinear_WU10000_Oadamw_DS12/models/ and are newly initialized: ['bert.pooler.dense.weight', 'bert.pooler.dense.bias', 'classifier.weight', 'classifier.bias']\n", + "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "liver\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n" + ] + }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA8MAAAQrCAYAAACoxX5XAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy81sbWrAAAACXBIWXMAAA9hAAAPYQGoP6dpAACRsUlEQVR4nOzdd5hV9b228Wd6LwwDQx167yhNQRSwgF2jGCMEjZpoNMbe8nrUJJaYxNiSc+wlGhVRbEksqCBKlya9dwaY3ut+//CwD4Q9ixlmwffHzP05l9c1MHv23HCYMA9rr7XCAoFAQAAAAAAANCHh1gEAAAAAABxrjGEAAAAAQJPDGAYAAAAANDmMYQAAAABAk8MYBgAAAAA0OYxhAAAAAECTwxgGAAAAADQ5jGEAAAAAQJMTaR0ANHWBQECVlZWqqamxTgEAAAgpPDxcUVFRCgsLs04BfMMYBoyUlJQoPz9fhYWFqq6uts4BAADwFBERoaSkJKWkpCg+Pt46B2iwsEAgELCOAJqawsJCbd++XVFRUUpOTlZCQoLCw8P511YAAOCcQCCgmpoaFRcXq6CgQJWVlWrXrp2SkpKs04AGYQwDx1hJSYm2bNmi5ORktWnThgEMAACOG4FAQDt37lRBQYE6dOjAEWIc17iAFnCM5efnKyoqiiEMAACOO2FhYWrTpo2ioqKUn59vnQM0CGMYOIYCgYAKCwuVnJzMEAYAAMelsLAwJScnq7CwULzIFMczxjBwDFVWVqq6uloJCQnWKQAAAEcsPj5e1dXVqqystE4BjhhjGDiG9t8+KTycLz0AAHD8ioiIkCRuDYnjGt+RAwZ4iTQAADie8b0MGgPGMAAAAACgyWEMAwAAAACaHMYwAAAAAKDJYQwDAAAAAJocxjAAACGMGTNGPXr0UI8ePbR9+/ZD3j9p0qTg++fNm2dQCAAAGoIxDAAAAABochjDAAAAAIAmhzEMAAAAAGhyIq0DAAA4Hr322mvWCQAAoAE4MgwAAAAAaHIYwwAAAACAJoeXSQMAmpTq6mq9++67+vDDD7Vu3ToVFxerRYsW6tevny655BKdfPLJdXqeSZMmaf78+ZKkV199VcOGDQv5uMrKSv3zn//UZ599plWrViknJ0eVlZVKTU1Vs2bN1Lp1aw0fPlwjR45U9+7dD/t5N2zYoPfff1/ffvutdu7cqYKCAiUmJqp9+/YaOXKkLrvsMmVkZBz2eQoLCzVz5kzNnz9fq1at0tatW1VcXKzo6Gg1b95c/fv317hx43TmmWcqPLxu/3Y+Z84cffjhh1q+fLl27dql0tJSJSYmKi0tTenp6Ro8eLBGjhypgQMHKioqyvO5cnNz9d577+nrr7/Wxo0blZOTo5iYGLVs2VLDhg3TRRddpH79+tWpCwCAUMICgUDAOgJoKsrKyrRp0yZ16tRJsbGx1jlAk5OVlaXrrrtOK1asqPUxl1xyie677z6dddZZ2rFjhyRpxowZateu3UGPq8sY3rRpk375y19qw4YNder79NNP1aFDh5Dvq6io0O9//3tNnTpV1dXVtT5HbGysbr/9dl1xxRWen+fWW29VRUXFYZt69eqlp59++pBf/4GKi4t122236Ysvvjjs80nS7373O11yySW1vv/111/X448/rsLCwlofExYWposuukj333+/oqOj6/R5AfiH72nQGHBkGADQJOTl5emnP/2pNm3aFPy5jh07ql+/foqKitKqVau0atUqTZ06VfHx8Q3+fEVFRbryyiu1a9cuSVJ4eLh69eqlLl26KD4+XmVlZcrKytLq1auVm5vr+VwlJSX62c9+pu+++y74c+3atVPfvn2VkpKi/Px8LV68WFlZWSorK9Nvf/tbFRUV6Re/+EXI58vOzg4O4VatWqlr165KT09XbGysSkpKtGHDBq1cuVKBQECrVq3ST37yE02fPl3NmjUL+Xx33HHHQUO4Q4cO6tWrl1JSUlRVVaWcnBytXbs2+I8LXh566CG98sorwR+npqZq4MCBatmypcrLy7Vq1SqtXbtWgUBA06ZN0549e/Tss8/W+eg1AAD7MYYBAE3CI488EhzCMTEx+t3vfqfzzjvvoMd8++23uuWWW/TKK68c9mW8h/POO+8Eh3DXrl311FNPqXPnzoc8LhAIaPny5Xr33XdrPcL5wAMPBIdwZmamHnjgAZ100kkHPaa6ulpvvfWWHn74YVVUVOjJJ5/UsGHDNGjQoEOeLyMjQ7feeqvOPPPMWo9Eb9u2Tffff79mz56t3bt3649//KN+//vfH/K4VatW6fPPP5ckxcfH6y9/+YtGjx5d63N+9NFHat68ecj3v/POO8EhHB8frzvuuEMXX3zxIb8vc+fO1R133KGsrCx9/fXXeuGFF3TNNdeEfE4AAGrDP6MCABq9jRs36r333gv+ONQQlqSTTjpJf/3rXxUeHq7KysoGfc4Dj+Lee++9IYew9MPLffv376/7779frVu3PuT9Cxcu1PTp0yX9cBT3jTfeOGQIS1JERIQuv/xy3X///ZJ+GMfPPPNMyM85ZswYXXvttbUOYUlq3769/vu//1s9evSQJH344YfKz88/5HGLFi0Kvj158uRah/D+57zuuus0ZsyYQ95XVFSkRx99NPhrefbZZ/XjH/845D8QDB8+XC+99JJiYmIkSc8//7xKS0tr/bwAAITCGAYANHrvvPNO8O2BAweGHML7DR48WOeee26DP2dRUVHw7bS0tCN+npdeein49i233KIWLVp4Pv6iiy4KDu/Zs2crJyfniD93VFRU8PeivLz8oOG7n1+/zmnTpqmgoECSdOGFF2rIkCGej+/SpYsuuOACST+8BP7rr78+4s8NAGiaeJk0AKDRmzdvXvBtryG83wUXXKD333+/QZ/zwKO8b7zxhh588MF6P0dVVZW+/fZbSVJkZKTOPPPMw35MWFiYhg0bpo0bNyoQCGjx4sUaO3ZsrY8vKCjQkiVLtH79euXl5amkpEQ1NTXB92/cuDH49qpVqw45qnvgr3P69Om65JJLjuic61mzZgXfPvvss+v0McOHD9dbb70l6Ycj1GeccUa9Py8AoOliDAMAGrVAIKA1a9YEfzxgwIDDfkz//v0VFhamhtxwYcKECcEj0m+99ZaWL1+uCy64QCNHjlSXLl3q9Bxr1qxRSUmJJCk6Olp/+MMf6vRxy5cvD769e/fukI/Zfx7wJ598UqerSksKeaGv0aNHKz4+XiUlJVq5cqXOOussXXTRRTr11FPVp0+fOp97vXjx4uDb06dPD56H7OXAX9v+87MBAKgrxjAAoFErLCw86PzfNm3aHPZjEhMTlZSUFHzZ7pE4+eSTNWXKFL388suSpJUrV2rlypWSpGbNmmnw4MEaOnSozjzzzJDnCkvSnj17gm+XlJTo9ddfr3dHqPN8V65cqSlTpoR8n5fi4uJDfi41NVUPP/ywbrvtNlVWViorK0t/+9vf9Le//U2xsbHq37+/hgwZojFjxqhv3761Pu+Bz30kR+Ub8v8rAEDTxDnDAIBGbf+R1f3qej/MuLi4Bn/uu+++W3/7298OuaJzbm6uZsyYoYcfflinnXaabrzxxpC3HfK6z25d/ec9iSsqKnTjjTcGh3B6erpuuOEGvfbaa5o5c6aWLFmi1atXa82aNVqzZo0efvjh4MfWdqT8rLPO0rRp03TWWWcddCS4rKxM8+fP1zPPPKOLL75YF110kRYsWHDIxx943rFfv04AAA6HI8MAgEbtP89fLSsrq9M5rX5dnXjMmDEaM2aMsrKyNG/ePC1cuFCLFi3S+vXrJf0wMD/99FPNnz9fb775pjp16hSyvVevXsGrSjfEJ598ou3bt0v64erU06ZNU3p6eq2PD3U0OJQePXroiSeeUFFRkRYuXKgFCxZo0aJF+v7774NH5lesWKHJkyfrz3/+s8aPHx/82P/8h4eFCxcqKSmpvr80AADqhSPDAIBGLSkp6aCjlTt37jzsxxQVFflyVPZAGRkZOu+88/Tggw/q448/1syZM3XTTTcFB29eXp4eeeSRgz7mwPvxbtu27aALWx2pOXPmBN+eMmWK5xCW6vb7daDExESdeuqpuv322/Xmm29q7ty5euSRR9S2bVtJUk1NjR544AGVlZUFPyY5OfmgWyht3ry5Xp8TAIAjwRgGADRqYWFhwXvlStLSpUsP+zHLli1r0MWz6qJVq1a6/vrr9dvf/jb4c998881BF7Pq1atXcCQWFRUddJGpI3Xgechdu3Y97ONDvay5PhITE3XhhRfqlVdeCf5acnNzD/m19O/fP/j27NmzG/Q5AQCoC8YwAKDRGzZsWPDtDz744LCPf++9945mzkFOPfXU4NuVlZXKy8sL/jg2NlbDhw8P/viVV15p8OcLD/+/v/oPPDobyvfff3/Qlakbon379urWrVvwx9nZ2Qe9/8DfhzfffFPl5eW+fF4AAGrDGAYANHoXX3xx8O0lS5Z4DuLvvvtOH330UYM/Z05OTp0ed+AtgcLDw5WamnrQ+6+55prg25988onefffdOjfs3bv3kJ9r37598O0ZM2bU+rGlpaW67777Dvs56vrrrKqqOuiodFpa2kHvv+yyy5ScnCzph1sm3X///XU+Op+Tk8MFtAAA9cYYBgA0el26dNH5558f/PFvfvObkIN4zpw5uv7661VTU1Pn++PW5rLLLtMtt9yimTNn1nof3w0bNujOO+8M/njEiBEHnTsrSUOHDtWFF14Y/PE999yjRx99NOQ9f6Ufrhb9+eef65e//KWuu+66Q95/4BHY6dOn68UXXzxkSG7ZskVXXXWVVqxYcdiLjT322GO6/PLL9d5779V6q6acnBzdc889wXGemJiowYMHH/SYpKQk3X333cEfv/vuu/rFL36hDRs2hHzOQCCgxYsX68EHH9SYMWMOe5QbAID/xNWkAQBNwt13360lS5Zoy5YtKi8v1+23365nnnlGAwYMUEREhFavXh28D/BPf/pTff755yFvd1RXVVVV+vjjj/Xxxx8rNjZWPXr0UPv27ZWQkKCCggJt3bpVK1asCD4+NjZWd9xxR8jnevDBB7V3717Nnj1bgUBAL774ol577TX169dPmZmZiomJUVFRkbZu3aq1a9cGX2Lcp0+fQ55r1KhRGjp0qObPn69AIKBHH31Ur7/+uvr06aPExERt2bJFixcvVnV1tTIyMjR58mQ99thjtf46A4GAFi1apEWLFikiIkKdOnVSly5dlJKSorKyMu3evVuLFy8+6F7Pd955Z8hbXF100UXatm2b/vrXv0qSvvrqK82cOVPdunVT9+7dlZCQoNLSUmVlZWnVqlXcWxgA0CCMYQBAk9CsWTO98soruv7664Ojd/PmzYdcufiiiy7Sbbfdps8//7xBny8hISH4dllZmZYuXVrrxbvatWunxx57TD179gz5/ujoaD377LN6+umn9dJLL6m0tFSVlZX67rvv9N1334X8mKioKA0cODDk+x5//HFde+21wTG+ffv24O2W9uvataueeOIJLVu2rM6/zurqaq1fvz5426hQj73rrrt06aWX1vp8N910k7p166aHH35Ye/bsUSAQ0Nq1a7V27dpaP6Z///4NPpIPAGh6GMMAgCajdevWmjp1qt599119+OGHWrt2rUpKStSiRQv17dtXF198sUaPHu3L55o+fbqWLFmiefPmadmyZdq0aZP27NmjsrIyxcbGqkWLFurZs6fGjBmjCRMmHPLy6P8UERGhm266SZMmTdL06dP17bffasOGDcrNzVVVVZUSEhLUtm1bde/eXcOGDdPo0aMPOS93v/T0dL355puaOnWqPv74Y61bt06lpaVq3ry5OnXqpAkTJujcc89VXFzcYcfw//t//0+XX365vv32Wy1ZskTr16/Xrl27VFxcrIiICKWmpqpbt246+eSTdf755x90u6jaTJgwQePGjdPHH3+s2bNna/ny5crJyVFJSYni4uKUkZGhLl266IQTTtDo0aMPujczAAB1FRY42veOABBUVlamTZs2qVOnTiFfIggAAHA84HsaNAZcQAsAAAAA0OQwhgEAAAAATQ5jGAAAAADQ5DCGAQAAAABNDmMYAAAAANDkMIYBAAAAAE0OYxgAAAAA0OQwhgEAAAAATQ5jGAAAAADQ5DCGAQAAAABNDmMYAAAAANDkMIYBAAAAAE0OYxgAAAAA0OQwhgEAAAAATQ5jGAAAAADQ5DCGAQAAAABNDmMYAAAAANDkMIYBAAAAAE0OYxgAAAAA0OQwhgEAAAAATQ5jGAAAAADQ5DCGAQAAAABNDmMYAHy2YcMG/eY3v9EZZ5yhAQMGqEePHurRo4cmTZpknYZGaN68ecE/Y0899ZR1DtCk8PUHHN8irQMAeOuZ2tM64ZhbnbfaOuGILVy4UFdddZXKy8utU5zx8Q0/s0445s5++gXrBDRSO/YttU4w0TZ9gHUCgEaIMQwAPnrwwQeDQ/i8887T8OHDlZKSIklKTU01LAMAAMCBGMMA4JPdu3drzZo1kqSTTz5Zjz32mHERAAAAasM5wwDgk127dgXf7tOnj2EJAAAADocxDAA+qaioCL4dHR1tWAIAAIDD4WXSAJq0efPmafLkyZKkG264QTfeeKOysrL0+uuva8aMGdq5c6fCwsLUvn17nX766ZoyZYoSExMPeo4xY8Zox44dB/3c008/raeffvqgn9v/EmrgaKvvn+FQlixZog8++EALFizQnj17VFRUpPj4eLVv316DBw/WuHHjNGzYMIWFhQU/Zvv27Ro7dqwk6cILL9Qjjzyi7Oxsvf766/rss8+0c+dOhYeHq2PHjrr00kt14YUXKjLy/74V2blzp15//XXNnDlTO3bsUHh4uLp166aJEyfqggsuOOhzAcfC119/rTfffFNLly5Vfn6+mjdvrr59++qyyy7TyJEj6/1c//73v/Xdd99p7969Ki0tVWJiojp16qTBgwfrrLPOUv/+/Q/6mFB/R23bti34dbJ7927Fxsaqa9euuuKKK3TGGWcc9HWyYcMGvfbaa5o7d6527dqlmJgY9e7dW5MmTQp+rQJNGWMYAA4we/Zs3XrrrcrLyzvo51evXq3Vq1frgw8+0KuvvqpWrVrZBAKH0dA/wwUFBbrrrrs0Y8aMkO9bsWKFVqxYoddee02vvfaahg4dWmvLkiVLdMMNN2jv3r0H/fyyZcu0bNkyffHFF3ryyScVFRWlmTNn6pZbblFRUdFBj128eLEWL16suXPn6tFHH63j7wLQMDU1Nbrvvvs0derUg35+165d2rVrlz777DNNmjRJp59++mGfa/fu3br55pv13XffHfK+vLy84J/xF154QV988YXatm1b63N98cUXuv322w/6OikpKdH8+fM1f/58XX755brvvvsUFhamd955R/fff78qKyuDjy0rK9OcOXM0Z84cXX/99brpppvq8tsBNFqMYQD4X6tWrdKLL76oyspKXXjhhTrhhBOUkJCgzZs36x//+If27NmjLVu26M4779Qrr7wS/LgHH3xQZWVlWrt2rZ544glJ0oQJE3T22Wdb/VLQRB3pn+H9CgoKNHHiRG3cuFGSFBsbq/Hjx2vgwIFKTU1VcXGx1q9fr2+++UZr1qxRIBCotWXXrl267rrrlJ+fr3PPPVfDhw9XfHy81qxZozfeeEMFBQX64osv9Nxzz2nkyJG6/vrrFRkZqYkTJ2rQoEGKjo7WkiVL9NZbb6m8vFzTp0/X8OHDdeGFFx613z9gv4ceeig4hCMiInTOOedo2LBhio6O1qpVqzRt2jS99tprysrK8nye7du3a+LEidq3b58kKTk5WRMmTFDfvn2VlJSkgoICrV27VrNmzdKWLVs8v6ZWrlyp559/XpI0ceJEDR48WFFRUVq8eLHefvttlZeX64033tDAgQMVHx+ve++9VykpKbrooovUq1cvhYWFae7cuZo+fbqqq6v117/+VcOHD9ewYcN8+l0Djj+MYQD4XzNmzFDLli314osvqlu3bge977LLLtOPfvQj7dixQ3PnztX333+vvn37SlLwpXJJSUnBx3fu3Fnjxo07dvGAjvzP8H533313cAj369dPzzzzjDIyMkJ+ru+//17NmjWrtWXu3LlKSkrS66+/rkGDBgV/fsKECTrnnHN08cUXq7y8XC+//LLee+89tWzZUi+99JI6duwYfOzZZ5+tMWPGaMqUKZKkF198kTGMo27RokX6+9//LkmKi4vTc889pyFDhgTff+6552rKlCn66U9/qk8//bTW56mpqdGvfvWr4BAePXq0/vjHPyo5OTnk4+fMmVPr+6Qfjgq3bt1aL730kjp16hT8+bPPPltjx47VlVdeqUAgoKeeekqFhYXq1auXXnjhBTVv3jz42PPOO0+DBw/WvffeK+mHrynGMJoyLqAFAAf4wx/+cMiIkKS0tDT94he/CP541qxZxzILqLMj/TO8bNkyff7555Kkli1b6rnnnqt1CEtS3759PV/OKUn33nvvQUN4v27duuncc8+VJOXn52vr1q167LHHDhrC+40YMUIjRoyQJK1du1a7d+/2/JxAQ7300kvBI7S33HLLQUN4v5YtW+rxxx9XRERErc/zySefaMWKFZKknj176umnn/YcuyNGjPB8vyQ9+uijBw3hAz92+PDhkqRt27apuLhYTz755EFDeL8f/ehHyszMlCR9++23qqqq8vycQGPGGAaA/9WzZ8/gN92hHPi+9evXH4skoF4a8mf4/fffD759zTXXeB71rYtmzZoFB28oJ554YvDt3r17H/Rjr8euW7euQV2Al4qKCn311VeSpMTERE2cOLHWx/bs2VMnn3xyre8/8GvqV7/6VYPvMtCrVy/Po7gHfp2ceuqpwcHr9diKigpt3bq1QV3A8YwxDAD/K9QRrAMdeJQsPz//aOcA9daQP8MLFy4Mvu3HS/z79et30JWi/1N6enrw7QEDBng+14GPLSgoaHAbUJvVq1cHLzg1ePBgxcTEeD7e6x+f9n9NRUdHa9SoUQ1uGzhwoOf7+ZoC6o8xDAD/63BHwg78V/0D7ykMuKIhf4b3v/w4Pj5ebdq0OaYt9XlseXl5w8IAD3v27Am+3aFDh8M+vrajr0VFRSosLAw+jx/3nudrCvAfYxgA/ld4OP+TiONbQ/4M779VS3x8/DFv4WsPriguLg6+HRcXd9jH1/b1cuDz8DUFuIuvFAAAoMTEREk/3LMUaKoSEhKCb5eWlh728bV9vRz4PHxNAe5iDAMAgOD5xCUlJdq5c6dxDWCjZcuWwbfrcmGp2h6TmJgYvN3eli1bOLUGcBRjGAAAHHT7mP23WAKamp49ewbPp120aNFhz6edM2dOre878IrNX3/9tX+RAHzDGAYAADr//PODbz///PPKy8uziwGMREdHa/To0ZJ+OI9+6tSptT527dq1+uabb2p9/4FfU08++SRHhwEHMYYBAID69++vsWPHSpKysrJ0zTXXKCsrq9bHr1ixQjt27DhWecAxc9VVVyksLEyS9Kc//emg247tt2/fPt18882qrq6u9XnOPPNM9enTR9IPt2y68cYbPW9jNHfuXG5zBBxjtd8AEAAANCkPPfSQJk6cqM2bN2vZsmU644wzNH78eA0aNEgpKSkqKSnRxo0b9c0332jlypV69dVX1bZtW+tswFeDBw/WFVdcoddee00lJSWaPHmyzj33XA0bNkzR0dFatWqV3nnnHeXl5emMM87Qp59+GvJ5wsPD9cQTT2jixInKzs7WV199pXHjxmn8+PHq27evkpKSVFhYqHXr1mnmzJnavHmzZsyYoeTk5GP8KwaaLsYwAACQJKWmpurNN9/U7bffrq+//lplZWV677339N5774V8PLdvQWN1zz33qLS0VO+8846qq6s1ffp0TZ8+/aDHTJ48WePGjat1DEtS+/bt9fbbb+vXv/61li9frvz8fL355pu1Pp6vKeDYYgwDjludt9o6AWiQs59+wToB9dCsWTM9//zzmjdvnj788EMtXLhQe/bsUVlZmRITE5WZmalBgwbpzDPPDF4gCMdO2/QB1glNQnh4uH7/+9/rzDPP1D/+8Q8tXbpUBQUFat68ufr166eJEydq1KhRmjdv3mGfq127dpo6daq++OIL/etf/9KSJUuUnZ2tyspKJSUlqWPHjhoyZIgmTJigNm3aHINfHYD9wgKBQMA6AmgqysrKtGnTJnXq1EmxsbHWOQAAAEeE72nQGPBaDAAAAABAk8MYBgAAAAA0OYxhAAAAAECTwxgGAAAAADQ5jGEAAAAAQJPDGAYAAAAANDmMYQAAAABAk8MYBgAAAAA0OYxhAAAAAECTwxgGAAAAADQ5jGHAQCAQsE4AAAA4Ynwvg8aAMQwcQ+HhP3zJ1dTUGJcAAAAcuf3fy+z/3gY4HvGnFziGIiMjFRYWpvLycusUAACAI1ZeXq6wsDBFRkZapwBHjDEMHEPh4eGKi4tTcXGxdQoAAMARKy4uVlxcHEeGcVzjTy9wjCUmJqq4uFgVFRXWKQAAAPVWUVGh4uJiJSYmWqcADcIYBo6xlJQURUZGavv27aqurrbOAQAAqLPq6mpt375dkZGRSklJsc4BGiQsUMdLwb09fOTRbjkmLp072zoBUHl5uTZv3izph3GcmJioiIgIhYWF2YYBAAD8h0AgoOrqahUWFqqgoECS1LFjR8XExBiXAQ1T9zPew/kmHfBLTEyMOnXqpLy8POXn5ys3N9c6CQAAwFNkZKSaNWum1NRURUdHW+cADVbnMRwWxiuqAT9FR0erZcuWatGihaqqqnjJNAAAcFZERETwrhhAY1HnMRweyRgGjoawsDBFRUUpKirKOgUAAABoMur+MmmODAMAAAAAGom6HxmOYAwDAAAAABqHup8zzA21AQAAAACNBFeTBgAAAAA0OfV4mXTE0ewAAAAAAOCYqcetlTgyDAAAAABoHOo+hjkyDAAAAABoJOpxAS2ODAMAAAAAGgeuJg0AAAAAaHLq8TJpxjAAAAAAoHGoxwW0GMMAAAAAgMaBI8MAAAAAgCaHc4YBAAAAAE0Ot1YCAAAAADQ59ThnmFsrAQAAAAAaB+4zDAAAAABocniZNAAAAACgyanzGMb/mTRpkubPn1/nx8+YMUPt2rU7ikUAAAAAgPrgyDAAAAAAoMnhAloN9Mwzzxz2Mc2bNz8GJQAAAACAuuLIcAONGzfOOgEAAAAAUE/1uJp0+NHsAAAAAADgmKn7BbR4mTQAAAAAoJGo8xgO52XSAAAAAIBGou5HhsM5MhzKz3/+c61cuVK5ubmKi4tTy5YtNWjQIJ1zzjkaPny4dR4AAAAAIASODDfQV199FXy7srJSBQUFWr9+vaZOnarhw4frscceU8uWLe0CAQAAAACHqMc5w43jAlpjx471fP+MGTPq9DwpKSk66aST1LdvX2VkZCgiIkJZWVmaO3euZs2apZqaGs2dO1eXXXaZ3nrrLbVo0cKPfAAAAACAD8ICgUCgLg+cc/cdR7vlmPjN/EWe76/LGF68eLH69Omj6OjokO9fsWKFbrzxRu3YsUOSdMopp+i5556rfywAAAAA4Kio8xie+5u7j3bLMTH8dw8fk8+zadMmnXfeeaqoqJAkTZ06Vf379z8mnxsAAAAA4K3Or30OCwtvFP8dK506ddIFF1wQ/PGB5xYDAAAAAGzV+ZzhMC6gVW/Dhg3T22+/LUnasGGDcQ0AAAAAYL+6j2FurVRvzZo1C75dWFhoWAIAAAAAOBBHho+inJyc4NtJSUmGJQAAAACAA9V9DIdxZLi+5s2bF3y7U6dOhiUAAAAAgAPV/T7D4Y3jPsPHysaNG/X+++8Hf3zaaacZ1gAAAAAADlSPc4Z5mbQkvfrqq+rbt68GDx5c62NWrlypG264IXhbpZEjR2rAgAHHKhEAAAAAcBhcQKue5s6dq9///vfKzMzUiBEj1L17d6Wmpio8PFx79uzR3LlzNXPmTNXU1EiS2rZtq4ceesi4GgAAAABwoHqcM8zLpA+0detWbd261fMxI0eO1EMPPaSMjIxjVAUAAAAAqIt6nDPMkWFJuuuuu3Taaadp6dKlWr16tXJycpSbm6vKykolJiaqbdu2GjRokM455xwNHDjQOhcAAAAAEEKdx3A4t1aSJGVmZiozM1OXXHKJdQoAAAAA4AjV/cgwL5MGAAAAADQSXEALAAAAANDk1H0M8zJpAAAAAEAjwdWkAQAAAABNTt3HcCRHhgEAAAAAjUM9LqDFOcMAAAAAgMaBWysBAAAAAJqcuh8ZDuecYQAAAABA41CPC2jxMmkAAAAAQONQj1sr1f0gMgAAAAAALqv7GA7nyDAAAAAAoHGox5FhLqAFAAAAAGgc6nHOMBfQAgAAAAA0DvW4mjQvkwYAAAAANA7cZxgAAAAA0OTU/cgwL5MGAAAAADQSXEALAAAAANDk1OPWShwZBgAAAAA0DtxnGAAAAADQ5HDOMAAAAACgyeHIMAAAAACgyanHBbTqfhAZAAAAAACX1X0Mh3FkGAAAAADQOHBrJQAAAABAk8OtlQAAAAAATQ5HhgEAAAAATQ5HhgEAAAAATQ4X0AIAAAAANDl1H8OR3FoJAAAAANA4cGQYAAAAANDk1OMCWhwZBgAAAAA0DvW4gBZHhgEAAAAAjQNXkwYAAAAANDncZxgAAAAA0OTU4wJaHBkGAAAAADQOdR7D4ZEcGQYAAAAANA51v0Q0R4YBAAAAAI1E3Y8Mc84wAAAAAKCR4GrSAAAAAIAmp+4vk8YhPv/8c33wwQdavny59u3bp8TERGVmZmrcuHG67LLLlJSUZJ0IAAAAAAghLBAIBOrywNKs3Ue75ZiIy2jV4OcoKirSbbfdpi+//LLWx7Rq1UqPP/64Bg8e3ODPBwAAAADwV53HcNnerKPdckzEtsho0MdXVVXp2muv1TfffCNJSk9P1yWXXKKuXbsqPz9fH330kb777jtJUnJyst544w1169atwd0AAAAAAP/UeQyX5+w72i3HRExaeoM+/o033tADDzwgSeratateeeUVpacf/JyPPvqoXnzxRUnS4MGD9Y9//KNBnxMAAAAA4K86j+GK3Jyj3XJMRDdLO+KPra6u1ujRo7V3715J0rvvvqs+ffqEfNzFF1+sVatWSZKef/55jRo16og/LwAAAADAX3W+RHRYeHij+K8h5s+fHxzCQ4cODTmEJSkiIkKTJk0K/vjjjz9u0OcFAAAAAPir7leT5tZK+vrrr4Nvn3LKKZ6PHT16dPDtWbNmHbUmAAAAAED91X3hhoU1jv8aYO3atcG3+/Xr5/nY9PR0tW7dWpKUnZ2tnJzG8TJzAAAAAGgMGMP1sGnTpuDb7dq1O+zjD3zMxo0bG/S5AQAAAAD+qfvLpBs4JF0xduxYz/fPmDGj1vcVFhYG327WrNlhP1dqamrIjwUAAAAA2Kr7GFbjGMMNUVJSEnw7JibmsI8/8DHFxcVHpQkAAAAAUH/1GMONg9eRXwAAAABA01Dnc4YDjeS/hoiPjw++XV5eftjHH/iYhISEBn52AAAAAIBf6nxkONDQJdkIJCUlKT8/X5KUm5t72IGbl5d30McCAAAAANxQjyPDjeP/GqJTp07Bt7dv337Yxx/4mM6dOzfocwMAAAAA/FPnMVxT0zj+a4ju3bsH316+fLnnY/ft26ddu3ZJkpo3b660tLSGfXIAAAAAgG/qPoYDgUbxX0OMGjUq+PasWbM8Hztz5szg26NHj27Q5wUAAAAA+KvO5wzX1HDS8NChQ9WiRQvt3btX8+fP14oVK9SnT59DHlddXa3XXnst+OMJEyYcy0wAAAAAwGFwZLgeIiIidP311wd/fOeddyo7O/uQx/3xj3/UqlWrJEmDBw8+6IgyAAAAAMBeWCBQt4WYm5N/tFuOiWZpKQ36+KqqKl177bX65ptvJEktWrTQJZdcoq5duyovL08ff/yxFi1aJOmHK0j/4x//ULdu3RrcDQAAAADwT53H8L69uUe75ZhIb9Gswc9RVFSk2267TV9++WWtj2nVqpUef/xxDR48uMGfDwAAAADgL8ZwA3z++ed6//33tXz5cmVnZyshIUGZmZk6/fTTddlll3FvYQAAAABwVJ3H8O7dh54bezxq1aq5dQIAAAAAwFidrybdwGtPAQAAAADgjDqP4arqmqPZAQAAAADAMVOPI8McGgYAAAAANA51HsMNvUcvAAAAAACuqPMYruZl0gAAAACARoIjwwAAAACAJqceR4YZwwAAAACAxoEjwwAAAACAJqfuR4ZrOGcYAAAAANA41P3IcA1HhgEAAAAAjUM97jN8NDMAAAAAADh26jyGq7i1EgAAAACgkajHkWEODQMAAAAAGod6XECLMQwAAAAAaBy4tRIAAAAAoMnhatIAAAAAgCaHl0kDAAAAAJocLqAFAAAAAGhyeJk0AAAAAKDJqccFtI5mBgAAAAAAx049zhmuOZodAAAAAAAcM/U4Z/hoZgAAAAAAcOxwzjAAAAAAoMnh1koAAAAAgCanHhfQYgwDAAAAABoHjgwDAAAAAJoczhkGAAAAADQ5HBkGAAAAADQ59bi1EmMYAAAAANA41OMCWkczAwAAAACAY6fuL5OuZg0DAAAAABoHbq0EAAAAAGhyuIAWAAAAAKDJ4ZxhAAAAAECTU48jwzVHswMAAAAAgGOmHrdWOpoZAAAAAAAcO3V/mTSvkwYAAAAANBL1eJn00cwAAAAAAODY4dZKAAAAAIAmh3OGHbN9+3aNHTu2zo8fOnSoXnvttaNYBAAAAACND0eGAQAAAABNDvcZdtiwYcM0efJkz8ekpqYemxgAAAAAaETqcQEt1vCx1qZNG40bN846AwAAAAAanXqcM8wYBgAAAAA0DnUew1XVRzMDAAAAAIBjhyPDAAAAAIAmp+5Hhjln+JhbtGiRfvSjH2nLli0qLS1VSkqKOnbsqOHDh+vSSy9VRkaGdSIAAAAAHJe4z7DDtm7detCP9+3bp3379mnhwoX6n//5H91www36+c9/rrCwMKNCAAAAADg+cWslR3Xp0kUnnXSSunbtqpSUFJWXl2vjxo369NNPtWnTJlVWVurxxx/Xzp079eCDD1rnAgAAAMBxJSxQx5OB7/7brKPdckzMf+cBz/fPmDHjGJWEVlRUpC1btqhPnz4h3x8IBPTKK6/okUceCZ7H/fjjj2vChAnHMhMAAAAAjmscGa6nSZMmaf78+b4818MPP6yLLrrooJ9LTEysdQhLUlhYmKZMmaKioiI99dRTkqRnnnmGMQwAAAAA9VDnMVzdSNaw9ZFfv1xzzTV66aWXVFRUpPXr12vbtm1q3769dRYAAAAAHBc4MlxP55xzjgYMGODLc3Xv3v2IPzYmJkYDBw7U7NmzJUkbN25kDAMAAABAHTW5I8MNNXHiROuEoNTU1ODbBQUFdiEAAAAAcJyp+5HhmqOZgSORm5sbfDs5OdmwBAAAAACOL7xM+jhVVlamJUuWBH/cqVMnuxgAAAAAOM7wMunj1HPPPafi4mJJUufOnZWZmWlcBAAAAADHj/C6PrAm0Dj+c1lxcbEef/xxZWdn1/qYQCCgl19+Wc8880zw5375y18eizwAAAAAaDQ4MuyQ6upq/fd//7eee+45nXDCCRo4cKAyMzOVlJSksrIybdq0SZ9++qk2btwY/JhLL71U55xzjmE1AAAAABx/OGfYQdXV1Zo/f77mz59f62Oio6N144036uqrrz6GZQAAAADQONR5DOPoS0xM1KuvvqqlS5dq6dKl2rp1q3Jzc5WXl6fIyEilpKSoW7duGj58uC666CKlpaVZJwMAAADAcanOY7iKWysddeHh4Ro2bJiGDRtmnQIAAAAAjVqdx3CAl0kDAAAAABqJehwZZg0DAAAAABoHLqAFAAAAAGhyOGcYAAAAANDkcM4wAAAAAKDJ4WXSAAAAAIAmh5dJAwAAAACanLofGWYMAwAAAAAaCY4MAwAAAACaHM4ZBgAAAAA0OXUew5UcGQYAAAAANBIcGQYAAAAANDlhgQB3EAYAAAAANC3h1gEAAAAAABxrjGEAAAAAQJPDGAYAAAAANDmMYQAAAABAk8MYBgAAAAA0OYxhAAAAAECTwxgGAAAAADQ5jGEAAAAAQJPDGAYAAAAANDmMYQAAAABAkxNpHQA0Fj1Te1onAE740xUnWycAThh4/6+sEwAntE0fYJ0AhMSRYQAAAABwxPbt29WjRw/16NFDd911V8jHTJo0KfiYpm7MmDHq0aOHxowZU++P5cgwAAAAAOAg8+bN0/z58yVJF154odq1a2dc5D/GMAAAAADgIPPnz9fTTz8tSRo6dChjGAAAAABg67XXXrNOaBQ4ZxgAAAAA0OQwhgEAAAAATQ4vkwYAAADgrHnz5mny5MmSpBtuuEE33nijNm7cqNdff12zZ89WVlaWoqKi1LlzZ40fP16XX365oqOjQz7XmDFjtGPHDrVt21ZffPGFKioqNHXqVP373//Wpk2blJ2drdatW+uLL7445GMXLFigDz74QAsXLtTevXtVXl6utLQ09e/fX+ecc47OOOMMhYWFHfbXs3TpUr366qtauHChcnJylJqaqu7du+viiy/WhAkT6vR7MmnSpODFrdasWeP52OLiYr377rv6+uuvtWbNGuXm5kqS0tPT1b17d5100kk6++yz1bx5c0nSU089FTxXeL/9v/8H2v97GMrWrVv19ttva86cOdqxY4eKioqUnJysrl27auzYsbr00ksVFxd32F/n7t279eKLL2rmzJnavXu3YmNjlZmZGfz/c2xs7GGfwwtjGAAAAMBx45///KfuuecelZaWBn+utLRUS5Ys0ZIlSzR16lQ9//zzat26tefzbN++Xdddd53Wrl3r+biCggLdcccd+vLLLw953+7du7V79259+umnGjJkiJ588kmlpaXV+lxPP/20nnnmGdXU1AR/bs+ePdqzZ49mz56tf//737rllls8e+rjX//6l+6//37l5eUd8r4dO3Zox44d+vLLL/XZZ5/5ch5yTU2N/vKXv+iFF15QVVXVQe/Lzs5Wdna25s2bpxdffFHPPPOM+vbtW+tzffXVV7rllltUXFwc/LmysjLl5eVp2bJleu+99/Tss882qJcxDAAAAOC4sGLFCj377LOqqqrSOeecoxEjRig2Nlbr1q3TtGnTtHfvXq1fv14//elP9e677yoxMTHk81RUVOjGG2/U2rVrNXDgQJ155plq1aqVCgsLDxrHRUVF+vGPf6z169dL+uFo6IQJE9S1a1dFR0drx44d+vjjj7Vq1SotWLBAV155pd5++23FxMQc8jlffvllPfXUU8Efjx07VqeeeqoSEhK0YcMGTZs2TZ988okCgYAvv1dvvPGGHnjggeCPe/XqpXHjxikzM1MRERHKysrS0qVLNWvWrIM+bsKECerVq5c+/vhj/fOf/5Qk3XTTTerevftBjwt1VPbOO+/UBx98IElKSkrS+PHj1b9/fyUlJSk7O1uzZs0KHuWdPHmypk2bpk6dOh3yPEuWLNENN9ygyspKSVKfPn107rnnKiMjQ3v27NGHH36o77//XjfddFPwMUeCMQwAAADguPDll18qLi5Ozz//vIYNG3bQ+6666ipdffXVWrZsmbZs2aI///nPuu+++0I+z969e7V3717deuutuvbaa2v9fPfdd19wCF955ZW69dZbFRUVddBjrr76av3pT3/Sc889p9WrV+uvf/2rbr755oMes23bNv35z3+WJEVEROiPf/zjIS+Jvuqqq/Tzn/9cn376ad1+MzwsX75cDz30kCQpMjJS9957r3784x+HfBl3SUmJFi5cGPxxly5d1KVLF61atSr4cyeccMIhv9//6c033wwO4eHDh+vxxx8/5Cj5T37yE33++ef69a9/reLiYt1zzz36xz/+cdBjqqurdc899wRH7qRJk3TPPfcoPPz/Lnc1efJkPfroo3r55Zfr8LtROy6gBQAAAOC4ceutt4YcZikpKXryyScVHx8vSXrnnXeC58eGMnbsWM8hvHr1an388ceSpNNPP1133XXXIUNYksLCwnTbbbfphBNOkCS9/vrrqqioOOgxf//731VeXi7ph3EX6tzgxMREPf7440pISKi1qa6eeuqp4Ji86aabdPnll9d6PnN8fLxOOeWUBn2+ioqK4HnGrVu31l//+tdaXy4+btw4/exnP5Mkfffdd1q6dOlB7//qq6+0YcMGSVLfvn0PGcKSFB4errvuukv9+/dvUDdjGAAAAMBRM3bsWM//6iMpKUmXXnppre9v3bp1cGiWl5dr5syZtT520qRJnp9r+vTpwbevueaaw7adf/75kqTCwsJDBt5nn30m6YcRd+WVV9b6HC1bttR555132M/lJScnJ/jS5/T0dE2ZMqVBz1cXs2fP1t69eyVJl19++WEH/QUXXBB8++uvvz7offt/ryRpypQphwzh/cLCwjx/L+uCl0kDAAAAOC6ceOKJIc/HPdBJJ52kd955R5K0bNmyg4bXfhERERo8eLDn8yxYsEDSD6Nr165dwbFXm6ysrODb69ev15AhQyT9cOGoHTt2SJI6deqkVq1aeT7PiBEjDnnpcH189913wfOOTznllFqvrO2n/b9X0g9HiT///HPPxx94nu/+o8D7LVu2LPj2SSed5Pk8I0aMqE/mIRjDAAAAAI6aGTNm+PZcHTp0qNdj9uzZE/Ixqamphx3V+wdsIBDQTTfdVI9KKT8/P2RDffuPxK5du4Jvd+3atUHPVVf7f68kHXSRsLo48PdK+r/fr8TExODtnmrTrFkzJScnq6CgoF6fcz9eJg0AAADguFCXe9Me+JgDb8tzoLrcn7awsLDuYf/hwCOfBzbUt/9IFBUVBd/ef/700ebX75X0wwW9pLr/PjTk94sjwwAAAACOCwfeW7guj2nIxaji4+NVUFCg5OTkg14GXF8HNtS3/0gceDup/cPyaDtwdL///vvq2bNng56rsLCwzr8PDfn94sgwAAAAgOPCli1bDvuYrVu3Bt9u2bLlEX+u/ef2FhQUHHQ+cH0d2HBgW23q8mv0cuA5yftvC3W0Hfg5D7xP85HIyMiQ9MMR7pycHM/H5ubmHvFLpCXGMAAAAIDjxKJFiw65bdF/+vbbb4NvN+TWO0OHDg2+3ZB7/zZv3lxt27aVJG3cuPGww3rOnDlH/LmkH+4JvP82SrNmzTrs71dtDrwV0/4LctXmwN+rA68GfSQO/P/Zgf+/DKWhv1eMYQAAAADHhYKCAk2dOrXW92dlZQXvDRwTE6NTTz31iD/XgVehfvbZZw97lNLL6aefLkmqqanRyy+/XOvj9u3bpw8//PCIP48kpaWlBe8bvG/fPs/P56U+L+8+5ZRTghe7+uyzz7Ro0aIj+pzSD/ch3u/ll1+udYgHAgG98sorR/x5JMYwAAAAgOPIH//4Ry1cuPCQny8oKNBNN90UPE/2kksuUWpq6hF/nn79+unss8+W9MMVjq+++mpt27bN82OWLFmiRx999JCfv+KKK4JXr37llVf073//+5DHFBUV6eabbz7oAlhH6oYbblBk5A+Xh3riiSc8b9VUWlp6yL1+Jaldu3bBt1esWOH5+eLi4vSrX/1K0g8j9Ze//OVhj9pu375djzzyiLKzsw/6+VNPPVVdunSRJC1fvlyPPPKIampqDnpMIBDQH/7wBy1ZssTzcxwOF9ACAAAAcFw47bTT9M0332jy5Mk6++yzNXz4cMXGxmrt2rWaNm1a8F7AHTp00M0339zgz/fb3/5Wmzdv1ooVK7RixQqNHz9eY8aM0Yknnqj09HTV1NQoOztba9eu1Zw5c7Rjxw5lZmbqzjvvPOh52rdvr1tuuUUPP/ywqqurddNNN+n000/X6NGjlZCQoA0bNmjatGnatWuXzjjjjAa9LFv64aXGd999t37729+qqqpK999/v9566y2dfvrpyszMVEREhPbs2aPly5frq6++Uu/evTVq1KiDnuPEE09UVFSUKisr9cILL0iSevbsGbxvcWxs7EEvj77sssu0cuVKvfXWW8rNzdWUKVM0ZMgQjRo1Sm3atFFkZKTy8/O1YcMGLVq0KDiwp0yZctDnjYiI0EMPPaQrrrhClZWVevnll7Vw4UKde+65atmypfbs2aOPPvpIy5cv14ABA7Rr165ab6F1OIxhAAAAAMeFPn366Pzzz9fdd9+tDz74QB988MEhj+nSpYuef/75g66qfKQSEhL097//Xb/97W/13nvvqbKyUp988ok++eSTWj/mwItJHWjKlCkqKCjQX//6VwUCAX322WeHnF971lln6eabb27wGJZ+OBqdkpKiBx54QIWFhVq1apVWrVoV8rEHnh+8X7NmzXT11Vfrb3/7m0pKSg65f3Dbtm31xRdfHPRzDz74oDp27Kgnn3xSpaWlWrBggeeVuJs1axYc1wcaOHCgnnrqKd16660qLi7W999/r++///6gx3Tv3l1PPPGEfvKTn9T6/IfDGAYAAABw3Bg/frx69Oihv//97/rmm2+UlZWlyMhIde7cWRMmTNDll18ecmAdqfj4eD388MO6+uqr9e6772r+/Pnavn27CgoKFBUVpbS0NHXq1EmDBg3SKaec4nnRrl/96lc65ZRT9Nprr2nBggXKyclRamqqevTooYsvvlgTJkzQ9u3bfWs/99xzNXr0aE2dOlWzZs3S+vXrlZ+fr/DwcLVo0UI9evTQyJEjgy8H/0+//vWv1bNnT7377rtatWqVcnNzD7kv8H+66qqrdMEFF2jatGmaM2eO1q1bp7y8PElScnKyMjMz1a9fP5188sk66aSTFBUVFfJ5TjvtNP3zn//UCy+8oFmzZmnXrl2Ki4tT+/btNWHCBP34xz9u8D2ZwwKHuzQYgDrpmXrk91MDGpM/XXGydQLghIH3/8o6AXBC2/QBDfr4efPmafLkyZJ+OBf2xhtv9CML4Mgw4JfVeautEwAAAADUEVeTBgAAAAA0OYxhAAAAAECTwxgGAAAAADQ5jGEAAAAAQJPD1aQBAAAAAE0OR4YBAAAAAE0OYxgAAAAA0ORwn2E0Wj1Tex615w51T+E1f3/lqH0+4HiSftb51gmAE6praqwTACe0bJlmnQCExBgGfLL06eesEwAndB90qnUC4ISoSF6AB0iMYbiLMQz4pMWAXtYJgBMS4qKsEwAnREQwhgHAZYxhwCc5azZaJwBOaF3NTQoASSosKbNOAAB4YAwDPul+6YXWCYAT2rZMtk4AnLA7u9A6AQDggTEM+GTVq29aJwBOiBh3gXUC4ITi0grrBACAB8Yw4JP0vt2tEwAncAVd4AftWqZYJwAAPDCGAZ9U/PQ26wTACYUlHA0DJElffGBdADih9fU3WCcAITGGAZ/UPP4b6wTACV3/+3nrBMAJa4acaZ0AAPDAGAZ8ktqti3UC4IQvFm2xTgCccFbXeOsEAIAHxjDgk53fLrBOAJww/JecMgBI0vKsAusEwAmjrAOAWjCGAZ8Eari3KiBJlVXV1gmAE6Iiw60TAAAeGMOAT1K7ZFonAE6IiGAAAJKUFB9tnQAA8MAYBnzS99G/WCcATij59gvrBMAJ61J7WCcATujTo711AhASYxjwyUdzNlonAE44pbTEOgFwQusuidYJAAAPjGHAJ5cPbWWdADghslkf6wTACZXVNdYJAAAPjGHAJ2veeN06AXBC4sWTrRMAJ7ROT7JOAAB4YAwDPln/7kfWCYATBjKGAUnSio17rBMAJwwfxD8MwU2MYcAnnSaMtU4AnNApg296AEnauDPPOgEA4IExDPhk21ffWCcATmhz9Y3WCYAT4mL4NgsAXMb/SgM+iU5MsE4AnFBcWmGdADghPSXOOgEA4IExDPgk8d4/WCcATohe8511AuCE3Da9rRMAAB4Yw4BPEj9/yzoBcEKrX99qnQA4IWtdlnUCAMADYxjwSdSFP7VOAJywdXe+dQLghNZ71lgnAI7obB0AhMQYBnyy7a5fWScAThj03y9YJwBO2JLezToBcEKmdQBQC8Yw4JO0+/9knQA4Yd22bOsEwAnNF31mnQC4YfAN1gVASIxhwCfZ/3WzdQLghFa/e8I6AXBC6nkTrRMAAB4Yw4BP2owYap0AOGHl1jzrBMAJHSr3WScAbmjXwroACIkxDPhkwwefWCcAThh+2c+sEwAnrM8rsU4AnNDSOgCoBWMY8ElUQpx1AuCE9NQE6wTACRt25FknAAA8MIYBnzTv08M6AXDCN8u2WScATji9fyvrBACAB8Yw4JPd85dYJwBOGPTrNOsEwAmLNudZJwBOOCkt3ToBCIkxDPik3egR1gmAE1o1T7JOAJyQU1BmnQAA8MAYBnyyfeYc6wTACenXFlonAE4IBALWCQAAD4xhwCcdzjzVOgFwQliYdQHghq7tOGUAAFzGGAZ8suWTr6wTACck/eQX1gmAEzbtzLVOAJyQ3qKZdQIQEmMY8EnmuFHWCYATOrbmmx5AkpZvyLJOAAB4YAwDPtn6+dfWCYATUn+aZ50AOCGcUwYAwGmMYQCAr8JZAIAkqYbrZwGA0xjDAABflVdWWScAToiJirBOAAB4YAwDAHwVG81fLYAk7c0rsU4AAHjgOxbAJ+FRfDkBkpSSGGudADhh48486wQAgAe+ewd8EhkbY50AOGHpOq6gC0jSSf3aWycAADwwhgGfzD7/HusEwAk3Fm6yTgCcEAi0s04AAHhgDAM++UnZXOsEwAltTrnBOgFwwrff77BOAJwwemiSdQIQEmMY8MnWz7+yTgCcUHDKhdYJgBP6dGphnQAA8MAYBnwS17yZdQLgBK4mDQAAjgd8xwL4JKVrZ+sEwAnFpRXWCYATUqLDrBMAAB4Yw4BPtnzylXUC4ITul15jnQA4Ye3uQusEwAl9UlKtE4CQGMOATyLjubcqIElpyXHWCYATtmblWycAADwwhgGfpPftaZ0AOGHhmt3WCYATzurd3DoBAOCBMQz4JKFVhnUC4IQTGQCAJOnDZXutEwAnTGzT1joBCIkxDPgka9ES6wTACZ1/kWidADihRQrnDAOAyxjDgE8C1dXWCYATqqtrrBMAAAAOizEM+KTdQ09YJwBOWLY+yzoBcELGsi+tEwA3jOC6KnATYxjwSfnbL1gnAE4Ydett1gmAEz6rOMU6AXBCH+sAoBaMYcAn+5avsE4AnBCI4K8WQJLKKjl9BgBcxncsgE9aDT3BOgFwQ3mZdQHghOjIcOsEAIAHxjDgk6IdO60TACeERUVZJwBOqOJicgDgNMYw4JP8TVusEwAnfL18h3UC4ISxJ3ayTgAAeGAMAz6Jb5FunQA4YVS/ttYJgBOmfb3eOgFwwmXjB1knACExhgGfVBYVWycATvh25S7rBMAJE0Z0tU4AAHhgDAM+KdjKOcOAJHVslmCdADhh1ea91gmAE4YNTLJOAEJiDAM+SWybYZ0AOKFty2TrBMAJm5dts04AAHhgDAM+OeWlV6wTACfs2ldgnQA4YXBckXUCAMADYxjwyco//8E6AXDCvtMmWicATji1GxeTAwCXMYYBn5SfPck6AXBCakSYdQLghM3/+tg6AXBC759da50AhMQYBnyy68YrrRMAJ/R48XXrBMAJ1SefZZ0AAPDAGAYA+CoxLto6AXDCzn2cMwwALmMMAz7JGNzXOgFwQmZLbqEBSNLyjdxaCQBcxhgGfJL13ffWCYATVm7Jtk4AnNCpdYp1AgDAA2MY8EnyY89aJwBOKCiusE4AnJAy95/WCYAbet9kXQCExBgGfFJwO1dKBCSp/Sv/sE4AnFB+2vnWCQAAD4xhwCdhERHWCYATqqprrBMAJ0RFhlsnAAA8MIYBnwSqq60TACdUVPK1AEhSalKsdQIAwANjGADgq1bNuZo0IEnrtnExOUCSunVuY50AhMQYBnzS9cIJ1gmAE1ISORoGSFJGWoJ1AgDAA2MY8MnWL2ZZJwBOyB55oXUC4IThfdtZJwAAPDCGAZ98e97d1gmAE+7s0do6AXDCjg+mWScATug5aYp1AhASYxjwyW9+Mtg6AXBCwZIF1gmAE3Z2HW6dADihp3UAUAvGMOCTf0/gnGFAkk54/W3rBMAJsbnF1gkAAA+MYcAn4dxnGJDEBbSA/dZtz7VOAAB4YAwDPml/6snWCYATdmUXWicATji5f3vrBACAB8Yw4JPETL7pASSpe2Zz6wTACYvW7LZOAJwwfBD3n4ebGMOAT6LGnGedADhhx74i6wTACZnFO6wTAEd0sQ4AQmIMAz7Z8sCd1gmAEzre/6h1AuCEitZdrRMAAB4Yw4BPIuO4aBAgSQtWZ1knAE44v3+6dQIAwANjGPBJTUWFdQLghNEDOX8ekKR13FoJkCQNbGddAITGGAZ80qxnD+sEwAmx0fzVAkhSfGyUdQIAwAPfsQA+Wf/eP60TACckXTLFOgFwQk5BqXUCAMADYxjwSVhEhHUC4IScgjLrBMAJLVLjrRMAAB4Yw4BPRr/3nnUC4ISoHO6tCkjS3hourAgALmMMAz6pWLnEOgFwQuzAIdYJgBNa1NRYJwAAPDCGAZ8s/D33VgUk6dS3p1onAE74Ztk26wTACeecmmKdAITEGAZ80mrIIOsEwAkJUWHWCQAAAIfFGAZ8Et8qwzoBcEKgstI6AXBCVGS4dQIAwANjGPBJ1glnWScATijZnGudADih3YqZ1gmAG0b2ti4AQmIMAz5J+ter1gmAE9pff6t1AuCE8rMutE4AAHhgDAM+CY+Msk4AnFBeWWWdADghMS7aOgEA4IExDPik33XXWycAToiIibFOAJyQVcQ/DAGAyxjDgE/Wvf2WdQLghO6XXWadADhh6fq91gmAE9q3a2GdAITEGAZ8Ep3KPfQASaqprrZOAJwQGcHVpAHAZYxhwCc5fUZaJwBOaL5zp3UC4ITeHTOtEwAAHhjDgE+2Xz/FOgFwQvrrb1snAE7IzS6yTgCckNm+pXUCEBJjGADgq7Aw6wLADYFAwDoBAOCBMQz4pNOEsdYJgBM6tki0TgCcsG5brnUCAMADYxjwSVkO3/QAkrRpDy8NBSRpQLcM6wQAgAfGMOCTMK4aCkiSyiu5tyogSc1T4q0TAAAeGMOATwq3brdOAJyQvTPfOgFwQs/2adYJAAAPjGHAJ/2u+4V1AuCExVxBC5Ak7Vm4wDoBcELb0adZJwAhMYYBn8z5f7+1TgCc0Oovz1snAG5o28+6AADggTEM+CT6t09bJwBOSIiLsk4AnJD/r3etEwAntP3ZtdYJQEiMYcAn5fdcZ50AOCH8hdetEwAnBEaOt04AAHhgDAM+mfDpv60TACds3Z1nnQA4odm+zdYJgCPaWwcAITGGAZ+s+NMj1gmAEwbfeY91AuCEb3KLrRMAJ7S2DgBqwRgGfJIxdJh1AuCEXdlF1gmAE07qx9EwAHAZYxjwSUJr/t0TkKTocu4zDEjS7KU51gmAE8aM6GmdAITEGAZ88v3//I91AuCE+Ovutk4AnHBir1bWCQAAD4xhwCctBw+2TgCc0L17hnUC4ITv1mZZJwBOGDYwyToBCIkxDPik+2U/tk4AnPDJ/E3WCYATeuz53joBcMPAztYFQEiMYcAn+ZsZAIAknTq4m3UC4IiO1gEAAA+MYcAns+/gPElAkpr/7inrBMAJ3dqnWScATkhMTrROAEJiDAM+Sfyvv1gnAE4Y3pNzhgFJqiopsU4AAHhgDAM+ybnlausEwAmbXn/bOgFwQk5BqXUC4ITh/BspHMUYBnzScmBv6wTACZt25VknAE4YM7C9dQIAwANjGPBJdDK3DQAkafigDtYJgBPmrtxpnQA4YdQQvkeCmxjDgE+2z5pnnQA4oUVWvnUC4ISIiDDrBACAB8Yw4JPul5xvnQA4oVlSnHUC4ISoyAjrBACAB8Yw4JO1U9+3TgCcEHve5dYJgBMKS8qtEwAndMy0LgBCYwwDPklsy6USAUmKjuZoGCBJURV8LQCAyxjDgE+KdmRZJwAAHFJYUmGdAADwwBgGfJLalSvoApKUnVdinQA4oVfHdOsEAIAHxjDgk/R+fawTACf069LSOgFwwsxl260TACeMa9HMOgEIiTEM+GT9e/+0TgDccDYX0AIkqXXzBOsEAIAHxjAAwFdRkeHWCYATKqtqrBMAAB4Yw4BPOp09zjoBcELP9mnWCYATZi7lZdIA4DLGMOCTTR9/bp0AOKH55F9YJwBOiIni1koA4DLGMOCTHi++bp0AOCEjotI6AXBCyx6trRMAAB4Yw4BP1l4z2ToBcELr996zTgCcsHjZVusEwAlnjuxtnQCExBgGfBKorrZOAJxQVFphnQA4IT4myjoBAOCBMQz4pPeUH1snAE5IKciyTgCc8MV2ThkAJGnUEOsCIDTGMOCTvDVrrRMAJ1SNOsc6AXDCj05tZp0AAPDAGAZ8ktiunXUC4IQtWQXWCYATOkaXWScAbmiWbF0AhMQYBnyydur71gmAE0780STrBMAJy/cUWicAThhlHQDUgjEM+CQsgvtJApJUVl5lnQA4ISGOC2gBgMsYw4BPuJo08IOYKP5hCJCknIJS6wQAgAfGMOCTzueeYZ0AOCEzg3PDAEnasjvfOgEA4IExDPhky2czrRMAJxSNm2idADihf9cM6wQAgAfGMOCToX9/0zoBcEKLZvHWCYATAkVcQAsAXMYYBnwy50cXWicATuj18hvWCYATqqsD1gmAEwbyIgk4ijEM+IQBAPwgKT7GOgFwQtSmldYJgCM6WAcAITGGAZ9Uf/yWdQLghFaTJ1snAE74pDrdOgFwQlvrAKAWjGHAJ2unvm+dADgh+twfWycATmibnmidAADwwBgGfJKc2cY6AQDgkOQEThkAAJcxhgGfFO/Jtk4AnJAYF22dADiB+wwDP+jexboACI0xDPikuqLCOgFwQk5BqXUC4IS05FjrBACAB8Yw4JOwsHDrBMAJ8bFR1gmAE/KLyqwTAAAeGMOATwLV1dYJgBPCwqwLADdUVtdYJwAAPDCGAZ9kjhtlnQA4oVXzJOsEwAn5ReXWCQAAD4xhwCdbP//aOgFwQpufc84wIElFpVxLAgBcxhgGfNJiQC/rBMAJq7bss04AnDBmYKZ1AgDAA2MY8MnepausEwAnDL23uXUC4IRvV+60TgCcMHpod+sEICTGMOCTiBiuoAtI0r68EusEwAmtmydaJwAAPDCGAZ/UVHHVUECS0lPjrRMAJ2zLKrBOAJzQvYt1ARAaYxjwSeezx1knAAAc0qMDpwwAgMsYw4BfwsOtCwAntAjnCrqAJL2/kHOGAUm6fEKadQIQEmMY8En++g3WCYATdlfyVwsgSSf2yLBOAAB44DsWwCf7vl9rnQA4oV1NwDoBcEJOAffcBgCXMYYBAL6KCA+zTgCcUFXNhRUBwGWMYTRaf7ri5GP6+arue+qYfj7AVZWz/mWdADihquex/XsIAFA/jGHAJ9v28XI4QJJOzmhlnQA4oUdmunUCAMADYxjwyQlduVIiIEllG9dZJwBOWLtxj3UC4ITWrbnNGNzEGAZ88tiHO6wTACdMu/5U6wTACUmbN1snAI7oZR0AhMQYBnxSxXVSAElS4bZt1gmAE5I7drROAAB4YAwDPslsxhV0AUkq2Mg9twFJUieOhgGSlGgdANSCMQz4ZNLoNtYJgBPCNnHKACBJKzbttU4AnMA5w3AVYxjwyWdLuFAKIEmTYsqsEwAntG6eYJ0AAPDAGAZ8EhsVbp0AOCEqgQEASJKiIqwLAAAeGMOAT8I5ZRiQJFXk5VsnAE4oKeRVEgDgMsYw4JOagHUB4IaE9u2tEwAnZLRpZp0AAPDAGAZ8UlhWbZ0AOKFo6xbrBMAJ+R37WScATkhvwT8MwU2MYcAnXVvFWScATkjL6GudADghukWydQIAwANjGPDJV6sKrRMAJ4zTPusEwAmJ/QZbJwAAPDCGAZ9cOIR76AGSFFFRYJ0AOGHjjhzrBMAJA1OTrBOAkBjDgE82ZRVbJwBO6Fu22zoBcEJ5JteSAACXMYYBnwzvmW6dADghIZurSQOSVMIt9wDAaYxhwCdvz95pnQA44ZeJ26wTACckdRtknQAA8MAYBnwytCtXDQUkKbaKV0kAklRYUWWdAADwwBgGfPL59/nWCYATxo3gfpKAJGV25MKKAOAyxjDgk4cmc29VQJIid26wTgCcsGJztnUC4ISBfRKtE4CQGMOAT3IKSq0TACck79plnQA4ISeDV0kAgMsYw4BPvlrKAAAk6YzSfdYJgBPiO0RZJwAAPDCGAZ8kx/PlBEhSYosO1gmAExLTk6wTAAAe+O4d8Enz5BjrBMAJJdt2WCcATtiewD8MAZKU2b6ldQIQEmMY8MmyTVxNGpCk7hEB6wTACS1S46wTAAAeGMOAT2Kjw60TACcEKqqtEwAnVFTytQAALmMMAz4ZuXK6dQLghK533GWdADjhgwXbrBMAJwyxDgBqwRgGfDKj2/nWCYATmv/rY+sEwAm9TzzNOgEA4IExDPjk1OVvWycATuj0+4esEwAnzFi0xToBcELv7tYFQGiMYcAnn/X+kXUC4IS0f7xunQA4oeWwM6wTAAAeGMOAT7q2SrBOAJwQkc9txgBJKimrtE4AAHhgDAM+mbWKWysBkjS8Q5R1AuCEru3SrBMAAB4Yw4BPmiVwayVAksIiIqwTACdU13DPbQBwGWMY8Em/zCTrBMAJFXvXWicATtixPcc6AXBC2zbp1glASIxhwCcREWHWCYATIqKjrRMAJ4SH8fcCALiMMQz4pF/n5tYJgBPaj+LK6oAktd67yzoBAOCBMQz45A/vrrdOAJxwb9vPrRMAJ6Sccb51AuCEZOsAoBaMYcAnGclcNAiQpKrSUusEwAl7cousEwAntG7Nq+fgJsYw4JO0BL6cAEkKq+IfhgBJqqistk4AAHjgu3fAJ3sKK60TACeEcZsxQJKUFB9jnQAA8MAYBnxy9gkZ1gmAEyK3bLNOAJywfV+hdQLghJ7drAuA0BjDgE9253CeJCBJbcvKrBMAJ4SHc2slAHAZYxjwyaJNBdYJgBOGtYi3TgCc0KVtmnUCAMADYxjwyXlDWlknAG7YxMukAUlaszXbOgFwQvt2LawTgJAYw4BP3v5ml3UC4ISbW9VYJwBO6JHJ7WQAwGWMYcAn94/kqqGAJMU2G2qdADhh3ua91gmAEzgyDFcxhgGf/G0595MEJGlixGzrBMAJrYafYZ0AAPDAGAZ8clHxAusEwAntL7jAOgFwwocrc6wTACcM6N3BOgEIiTEM+KS6nNvJAJIUHhllnQA4gTsrAYDbGMOAT1b0Pss6AXBCizWrrBMAJwzvM8A6AQDggTEM+CQ6Ktw6AXBC4ZYt1gmAE3bHZ1onAE7omJlhnQCExBgGfNIqLd46AXBCYng76wTACckJ3GUAAFzGGAZ8Ul3NvVUBSYpOTrFOAJyQnso/kgKAyxjDgE/+uSjLOgFwQpukDdYJgBPCW3S0TgAAeGAMAz5JiOGcYUCSqisqrBMAJ5SU8rUAAC5jDAM+GdWnhXUC4ISozQnWCYATdhVwyz0AcBljGPBJs1nTrBMAJ3S49ufWCYATVi3ZZZ0AAPDAGAZ88lH6aOsEwAmxU6daJwBOaDuC+88DgMsYw4BPWiRHWScATggrjLBOAJxQVlFlnQAA8MAYBnwyYP5b1gmAE7o98KB1AuCETxZstk4AAHhgDAM++brfxdYJgBOavfmGdQLghFZDTrdOAAB4YAwDPlmylauGApL0o14p1gmAE9plplsnAAA8MIYBn/RvF2udADihqrTUOgFwwpbdudYJgBP6pSZZJwAhMYYBn7RJYwwDkhQfl2GdADihKi7aOgEA4IExDPgkMY4vJ0CSSrKyrBMAJ+xOybdOAJzQuUMr6wQgJL57B3zSrV2qdQLghOYt+1snAE6ISkm2TgAAeGAMAz5ZtiHbOgFwQnzpWusEwAl5PVKtEwAndLEOAGrBGAZ8cumYntYJgBNylpZYJwBO2FFWaZ0AAPDAGAZ88thbi60TACdckcEVdAFJ6jG4l3UCAMADYxjwSd8O3DYAkKSKrI3WCYATtmzj9BlAklq3bm6dAITEGAZ8UhOwLgDcEHny6dYJgBPaLfzaOgFww9Du1gVASIxhwCczV+RYJwBOOKFqu3UC4IRmY8+yTgAAeGAMAz4Z3SfNOgFwQtnuLdYJgBMWr91lnQA4YfRQTiWDmxjDgE/eX7DPOgFwQt+O/NUCSFKfTi2sEwAAHviOBfBJany4dQLghIjYGOsEAACAw2IMAz4Z1JGXAAGSVJG7wToBcMKqzbxiCJCkUS2aWScAITGGAZ9s2lNinQA4YYgqrRMAJyTERVknAAA8MIYBn4zoyT30AElKKGhrnQA4oZR77gGA0xjDgE/mrM62TgCc0F47rBMAJ4R36GedAADwwBgGfJJTVG2dALgh0ToAcEN8DC+TBgCXMYYBn6QlRlgnAAAcUlLO+fMA4DLGMOCT7Tl80wNIktKsAwA3pCRwmzEAcBljGPBJl5Z80wNIUsLYC60TACcUf/KedQLghmt+YV0AhMQYBnwyb1OZdQLghImLvrZOAJzQ5fJJ1gkAAA+MYcAno7rHWycATqgq3m6dADhh9eZ91gmAEwb2SbBOAEJiDAM++WZdiXUC4ISbbr/YOgFwwrbPP7VOANzQp4N1ARASYxjwyS/PbGudADhh88cfWCcATtjR8QTrBMAJna0DgFowhgGf/PdnO6wTACc8PjLdOgFwwsl9+EdSAHAZYxjwSXREmHUC4ITi7ZwzDEhSdp9y6wTACa0SE60TgJAYw4BPuraMsk4AnFBdUWGdADhhd3aRdQLghFatmlsnACExhgGfxMeEWycATkjMyLROAJwQ05yjYQDgMsYw4JMlW7nPMCBJE5vx0lBAklISY60TAAAeGMOAT/q345seQJJKstZZJwBO+H5DlnUC4IQhA7ieNNzEGAZ8Es6rpAEAB6gJWBcAALwwhgGfJMfx5QRIUoSirRMAJwQCrGEAcBnfvQM+6dcpzToBcEJyeVfrBMAJ4clx1gkAAA+MYcAnM5fvsU4AnNC5S7V1AuCEdi1TrBMAAB4Yw4BPEmMjrBMAJ+StXWOdADhhS1J76wTACScN5hVDcBNjGPBJfkmVdQLghOb9+lsnAE7o1rONdQIAwANjGPBJNZcNBSRJOSu+t04AnJCV3NY6AXDCgN4drBOAkBjDgE+KymusEwAnBGL5WgAkKTKCe+4BgMsYw4BPBndOsk4AnBC+N8o6AXBCVl6JdQIAwANjGPDJdxsLrRMAJwyKrrBOAJzQIjXeOgEA4IExDPikvJJzhgFJan/26dYJgBNqqjkyDAAuYwwDPunZJs46AXBCdEZr6wTACfsWzLNOAJyQ2q2HdQIQEmMY8ElecaV1AuCEDX9/1ToBcELpiLOsEwAntLMOAGrBGAZ8MrBzqnUC4IT4wlbWCYATSq0DAACeGMOAT3Zkc24YIEm9I6qtEwAnxMdyZXUAcBljGPBJXHSEdQLghPJ9udYJgBP25BZbJwBO6G0dANSCMQz45PPv860TACcM4VXSgCSpW/vm1gkAAA+MYcAnvx3AEQBAktL7n2GdADhh9oY91gmAE9q2SbdOAEJiDAM+eXZbinUC4ITfnMB5koAkDW1eY50AAPDAGAZ8Us33PIAkaeP096wTACdUjTrHOgFwQpp1AFALxjDgk5Jy1jAgSWHx4dYJgBOio7iwIgC4jDEM+KRrq1jrBMANRWHWBYATcgq40zAAuIwxDPjkmtMyrRMAJ1Tkc/48IEn5xfzDEAC4jDEM+OSZzzZbJwBOOP+kjtYJgBMSv5lqnQC4YfBN1gVASIxhwCeXj+1unQA4oWLRN9YJgBM2DjzdOgFwQn/rAKAWjGHAJzF7t1knAE6I7djROgFwQk1OwDoBAOCBMQz45IWlXCgFkKSL4/mHIUCSug4aZZ0AAPDAGAZ8kpESY50AOKEiu8A6AXDC3uwi6wTACd27WBcAoTGGAZ9cdlI76wTACSV7+YchQJKyuZo0ADiNMQz45LkZm60TACeMKVxsnQA4IWzQWOsEAIAHxjDgk+055dYJgBuirAMANyTFR1snAAA8MIYBn9zYu8o6AXBCcuY46wTACfN2lVgnAAA8MIYBn9z9rXUB4Ib/2rfIOgFwwpCx460TAAAeGMOAT64v/Ld1AuCEmOG3WycATsgp4MgwIEmpzZKtE4CQGMOAT/I3bLZOAJwwqFm8dQLghM8XbLJOAJzQuUMr6wQgJMYw4JMX+/7cOgFwQucPplsnAE44+cxzrBMAAB4Yw4BPJgxItU4AnBAdyUtDAUnKyi6yTgCc0CwtxToBCIkxDPikZWqsdQLghIqdBdYJgBN25zCGAUnqaR0A1IIxDPikd6d06wTACTW5cdYJgBPKKqqtEwAAHhjDgE+mz+ZCKYAknVWZZ50AOCG9M/8wBAAuYwwDPgkLsy4A3BAWEWGdADghPJy/GADAZYxhwCffrCu2TgCcMPHM7tYJgBPap0dbJwAAPDCGAZ/87ie9rBMAJ5Qsn2udADhheVWqdQLghNNbtbFOAEJiDAM+qakJWCcAToiM5zxJQJLiYvg2CwBcxv9KAz55/pP11gmAE66M2WKdADihWYe+1gkAAA+MYcAnu/O5hQYgSWGtuYAWIEkpSbxKAgBcxhgGfNIymQEASFKgsso6AXBCTkGJdQLghLacMgxHMYYBn8RGcQsNQJLE7WQASVJ1NdeSAACXMYYBn3TOiLdOAJwQXZlqnQA4oTzAGAYAlzGGAZ8UlfLSUECSKksLrBMAJ5RV8PcCALiMMQz4ZO6GYusEwAlnZfJXCyBJXdulWScAADzwHQvgkxvGZ1onAE4IX7fbOgFwwuK1fC0AknRmS/5hCG5iDAM+iYwIt04AnBCVmGidADghPibKOgEA4IExDPjk3W+3WycATvhp9C7rBMAJqd0GWScAADwwhgGflFZw1VBAktqPH2edADghUFNunQAA8MAYBnzSpz23VgIkafuXX1gnAE7IGXCadQLghJHWAUAtGMOAT/YVVFonAE4Ij462TgCckJIYY50AAPDAGAZ8EhlhXQC4ITyCLwZAksLDwqwTAAAeGMOAT9qmxVknAE4IL+fIMCBJpeVV1gkAAA+MYcAnHVslWScATmgW19s6AXBCcssU6wQAgAfGMOCTsgqOAACSVFLArZUAScqLZAwDktSyZZp1AhASYxjwyXtzd1snAE64sfk26wTACQld+1snAAA8MIYBnyTFcdEgQJKiUzgaBkhSXHSUdQIAwANjGPDJnE3cWgmQpNuGd7ROAJwQkcspA4AkqXVz6wIgJMYw4JNP/mu4dQLghLxli60TACesrEq2TgCcMNo6AKgFYxjwyXm/m2udADjhxdOrrRMAJwwfxEWDAMBljGHAJxefyK2VAEkq3LzEOgFwwpaUTtYJgBNGpqVbJwAhMYYBnwzt2dI6AXBCzJpm1gmAE0rLueUeALiMMQz45KmPNlonAE64vWWedQLghB5DuWgQALiMMQz4pG0zbqEBSFJkXLx1AuCE8kqODAOAyxjDgE/aNY+xTgCcUJVfbJ0AOCE7m68FQJK6dbYuAEJjDAM++WJloXUC4IQJw9tbJwBO6NanjXUCAMADYxjwyUVDuIUGIElF2xdaJwBO+HrpNusEwAljRvS0TgBCYgwDPpm+MMc6AXDCRTdcbJ0AOCFxwTzrBMARjGG4iTEM+GT8gFTrBMAJexZ/Z50AOCGs94nWCQAAD4xhwCfrdpVYJwBOGBK1yToBcEJ1m+7WCYATuJIEXMUYBnwSFRFmnQA4Ib5Va+sEwAnxaYnWCQAAD4xhwCej+qRbJwBOCMsvsE4AnLB1d751AuCEli25yCjcxBgGfNK5bTPrBMAJFdsYw4Ak7Ygusk4AnMDZ83AVYxjwyUNvrbROAJzwzC/PtU4AnBD15RfWCYAj+lkHACExhgGflFQErBMAJxRsWG+dADiheV8GAAC4jDEM+KRjepR1AuCEPYsWWicATqgZcYZ1AuCEPtYBQC0Yw4BPBnZKtk4AnBC2iyurA5K0N49b7gGAyxjDgE/25JVZJwBOqKmutk4AnBAZEW6dAADwwBgGfFJSUWOdADghwBgGJDGGAcB1jGHAJ3M38HI4QJKuvGiEdQLghHZx/CMpALiMMQz45L60760TACckd7rKOgFwwjcrdlonAE44tat1ARAaYxjwycwWw60TACe0/H65dQLghBN6cWslAHAZYxjwyeg931onAE5IPnWidQLghM8WbbZOAJxw/hj+YQhuYgwDPnk75kTrBMAJl3/8kXUC4ITOJ59lnQAA8MAYBnxy36QTrBMAJ+yaVWidADhh8Y486wTACf16ZlonACExhgGfXPzwHOsEwAl/7ldgnQA44YzxXawTAAAeGMOAT07pHmedADihetAo6wTACetfft46AXBC/xtvsk4AQmIMAwB8Vf3Vh9YJgBNKR51rnQAA8MAYBnySU1RlnQC4ITxgXQA4ISYqwjoBAOCBMQz4JCzMugBwQ1gUf7UAkhQVGW6dAADwwHcsgE+qa6wLADcEqqutEwAnVPEXAwA4jTEM+CQplpfDAZIUFuBoGCBJlVWMYQBwGWMY8El1DedJApIkThkAJEmREfzDEAC4jDEM+GToJ3+xTgCckPbU/1gnAE7YuDPXOgEA4IExDPhk/pm/tk4AnNDh42nWCYAT4oefaZ0AAPDAGAZ80q1NgnUC4IbdvE4akKS8ojLrBACAB8Yw4JOvVuRZJwBOuOSXP7FOAJwQ9/kn1gmAI3pYBwAhMYYBnyTGcqEUQJLWvfm6dQLghpHjrQsAAB4Yw4BPJu751DoBcEK3O+60TgCc8MmirdYJgBN6d29nnQCExBgGfDK9DRdKASQp8c03rBMAJ3QYNcE6AQDggTEM+CQ+mpdJA5IUGRNnnQA4oTqcvxcAwGWMYcAnSXF8OQGSFD78dOsEwAlVn71vnQC4oft11gVASHz3Dvhk+PfTrRMAJ3Q8717rBMAJn/QYZZ0AOKGPdQBQC8Yw4JMF/S+0TgCckPz2m9YJgBPSB42xTgAAeGAMAz6Zt6HYOgFwwjn90q0TACdkdsmwTgAAeGAMAz75rx/3tE4AnFA570vrBMAJMxZusk4AnHD+mH7WCUBIjGHAJzO+226dADhhdHmZdQLghBapXFkdAFzGGAZ8klNYaZ0AuCHMOgBwQ3xMlHUCAMADYxjwyVlD2lonAE5oG895koAkVaemWicAADwwhgGfvPTZZusEwAk3ZmyzTgCcEDvqTOsEwAnN0qwLgNAYw4BPMtOjrRMAJ4RFRFgnAE4oKeP0GQBwGWMY8MmybVw0CJCkK07NtE4AnNA8nQtoAYDLGMOAT07pmWSdADghd9Vi6wTACeujOH8ekKRRQ3idNNzEGAZ8khDLlxMgSW1OGW2dADihW1surAgALuO7d8Anq7cXWScATjghZ5l1AuCEwLBE6wTACT27xVsnACExhgGf5BZXWycATmh+4XnWCYATst590zoBcEO366wLgJAYw4BPzhyUbp0AOKFwzlfWCYATSoZzayUAcBljGPBJIGBdALihoqDAOgFwQmFJhXUCAMADYxjwyeuzs6wTACc82CXcOgFwwoBurawTAAAeGMOAT3YXWhcAbkjp1t06AXBCZN5e6wTADemp1gVASIxhwCdDO/DlBEjSvu++s04AnFA1aoJ1AuCEAdYBQC347h3wycm9uKE8IEkxOXwtAJJUWF5lnQAA8MAYBnwyd02udQLghK6RfC0AkhQXw7dZAOAy/lca8Mmo3s2tEwAnpFR3tU4AnFAUxbdZAOAy/lca8MmevDLrBMAJRQXbrRMAJ+SkdrROAAB4YAwDAHxVU1lpnQA4obKqxjoBAOCBMQz4pGdmqnUC4ISYmmbWCYATqqsZwwDgMsYw4JO//XuLdQLghPu6B6wTACcM7tnGOgEA4IExDPhkwiBuJwNIUuXerdYJgBOWrtttnQA4YfTQZOsEICTGMOCTqMhw6wTACZFxcdYJgBNioiKsEwAAHhjDgE9mr+LeqoAk9YjOsU4AnJDQN8o6AQDggTEM+GRkLy4aBEhSbGEL6wTACTnlVdYJAAAPjGHAJ3+fvdc6AXBCv94MAECSenTgH4YAwGWMYcAnT107wDoBcEJhcbl1AuCEgiULrBMAJySfOsY6AQiJMQz45H8+WmWdADhhYuB76wTACXtOHm+dADihnXUAUAvGMOCTfh24bQAgScl9fmSdADihcuHX1gmAG/p2tC4AQmIMAz5Zua3QOgFwQu/N71gnAE4oHXGWdQLghM7WAUAtGMOAT+JjuM8wIEndz/uxdQLghIKtW6wTAAAeGMOAT5Li+HICJClr0ULrBMAJkb0HWScAADzw3Tvgk9FFy60TACekDhtnnQA44fMVO6wTACdc0Kq5dQIQEmMY8MlT2ZwRA0jSHcuWWScAThhz8qnWCQAAD4xhwCen9U2zTgCcUF2SbZ0AOGHt1n3WCYATTkxNsk4AQmIMAz45vXOsdQLghOryHtYJgBO2FJZbJwAAPDCGAZ9MX1lknQA4YWj2d9YJgBMi+o62TgAAeGAMAz6ZMKKTdQLghIrvc60TACfsCQSsEwAAHhjDgE9e+WS1dQLghIkZldYJgBN6dki3TgAAeGAMAz7ZkVthnQA4obh6p3UC4ISyLpwzDEhSK+sAoBaMYcAnZ5+QYZ0AOCF80xbrBMAJW7MKrBMAJ3Tv0sY6AQiJMQz45JWvdlknAE54ZERb6wTACaP7t7NOAAB4YAwDPrlkRAvrBMAJRdsWWCcATlgbxxgGJGnMiJ7WCUBIjGHAJ3vyyqwTACcMOO1s6wTACW1n/ds6AXADYxiOYgwDPunZPtU6AXBCzPZ11gmAEypPOt06AQDggTEM+GTanB3WCYAT7h/f2joBcEJydLV1AgDAA2MY8Mllez6xTgCcEN/2/1knAE74dvk26wTACady+jwcxRgGfPJR+wnWCYATkl57xToBcELyyeOtEwAAHhjDgE/WZlVYJwBOiB/YyjoBcEKXTtxlAABcxhgGfJKZxpcTIEml+/ZZJwBO2Lgj1zoBcELv7gnWCUBIfPcO+CQ2Ktw6AXBCoLLKOgFwQnkFXwsA4DLGMOCTywLLrBMAJ3Q67wLrBMAJn63OsU4AnDDYOgCoBWMY8MndO7ihPCBJf1u7xjoBcML4QQOtEwAAHhjDgE/6tomyTgCcULBhtXUC4ITcVl2tEwAn9GuWZp0AhMQYBnyyJqvSOgFwQvsrz7BOAJxQvHuHdQLgiEzrACAkxjDgk/H9k60TACdsm/GZdQLghOx+p1onAE7ghntwFWMY8En/pe9bJwBO6HbLbdYJgBM2fLfdOgEA4IExDPhk72mXWScATtjwwXTrBMAJCV2HWycAADwwhgGftJn/kXUC4ITMKyZZJwBOWLpsr3UCAMADYxjwyYIuY6wTACd0Li6yTgCccNEoriYNAC5jDAM++Xo1AwCQpFMKl1snAE5IPuN86wTACW0TEq0TgJAYw4BPqmoC1gmAExLatLFOAJzQMjXOOgEA4IExDPjk+r3vWScATqjsdq91AuCEXdnF1gmAEzLjE6wTgJAYw4BPns24yDoBcMJ/LZhpnQA4IWnceOsEAIAHxjDgkxi+mgBJUlh4mHUC4ITq6hrrBACAB759B3xySq8U6wTACZU5m6wTACes2MStlQBJGt2imXUCEBJjGPBJ+xacDwNIUlQFVw0FAADuYwwDPqmo4uVwgCRVl5VZJwBO4GXSAOA2xjDgk/U7C60TACdkVlRYJwBOiOViEgDgNP5XGvBJblGldQLghEBVlXUC4ISoiHDrBACAB8Yw4JNPVpVbJwBO+MXlQ6wTACdERJVYJwAAPDCGAZ90ac7tZABJ2rtksXUC4ISkMy6wTgCckGYdANSCMQz45KKZj1knAE7IfHuqdQLghC8XbbZOAJzQvl0L6wQgJMYw4JPUB5+wTgCcsOODadYJgBOiOnDKAAC4jDEM+OSJf26zTgCc8Ie+sdYJgBNGDsi0TgAAeGAMAz4Z0C7GOgFwQg23VgIkSduz8q0TACf0TE60TgBCYgwDPimpqLFOAJwQiOVrAZCkiAgurAgALmMMAz5pkRxlnQA4oYb7DAOSpILCMusEAIAHxjDgk5yiSusEwAkZpwyzTgCc0KllknUCAMADYxjwSUxkuHUC4IR9S5daJwBuGHqadQHghO7p1gVAaIxhwCc7cjkyDEhSZXWBdQLghGhOGQYApzGGAZ+0SuHLCZCk8Oho6wTACWUVnD8PAC7ju3fAJ2mJXEALkKTIiDjrBMAJTGEAcBtjGPDJGWXLrRMAJ3S8ZKJ1AuCEuauzrBMAAB4Yw4BPrl/d2ToBcML03busEwAndKvJsU4AHNHNOgAIiTEM+KR7C64mDUjSjq++tE4AnBA99jzrBMAJrawDgFowhgGfZBXWWCcATojtyj00AElq0zLFOgEA4IExDPikXWqEdQLghLI9e60TACds2plrnQA4oXf3BOsEICTGMOCTyuqAdQLghE7nn2+dADihcNs26wTAEe2sA4CQGMOAT07ukWydADghZ+VK6wTACYHuA6wTAAAeGMOAT8oqq60TACcUbNponQA4ITupg3UC4IT27VpYJwAhMYYBnwzo3Nw6AXBCQhYvhwMkqTCKa0kAgMsYw4BP3v12u3UC4IQrE7jPMCBJKV14mTQAuIwxDPjk4pPbWycATohcyxgGJGnzngLrBMAJPbu1tU4AQmIMAz75atlu6wTACeeUcjsZQJKSu8dYJwAAPDCGAZ9Uc2slQJIUl5FhnQA4ISGNe6sCgMsYw4BPCsu4mjQgSWU52dYJgBMCpRXWCQAAD4xhwCetUqOtEwAnpIy+0DoBcELxV/+yTgDc0GOSdQEQEmMY8Mk15/SzTgCcsOvTf1onAE5YnMbfC4AkdbcOAGrBGAZ8csv/zLdOAJxwY9I+6wTACQP6n2SdAADwwBgGfNK2WZR1AuCE1K49rRMAJzRrlWqdAADwwBgGfFJSUWOdADghb/Vq6wTACXvTO1snAE4Y2Icrq8NNjGHAJ2MHtLROAJwQsWGjdQLghJ15JdYJAAAPjGHAJ//4eqd1AuCE21vxKglAknpkNrdOAAB4YAwDPqkJWBcAjggPty4AnBARwdcCALiMMQz4ZHseaxiQpLRxva0TACfElORaJwCOSLcOAEJiDAM++fOUbtYJgBOK1y22TgCc8H1lsnUC4IQxXa0LgNAYw4BPHp223joBcMIDvSutEwAnDO3TzjoBAOCBMQz4pEtL7jMMSFJqV14lAUhSTKDKOgEA4IExDPhkVx7f9ACSlLNyhXUC4ITKFhwZBiSpVYp1ARAaYxjwyen906wTACdU795knQA4YeWmvdYJgBNateI2Y3ATYxjwSU2Aq0kDkpR0+nnWCYATOi382joBcERP6wAgJMYw4JMVWwutEwAnDCmbZZ0AOCFm6CnWCQAAD4xhwCdVNRwZBiSporDAOgFwQkVxuXUCAMADYxjwyZj+La0TACdE7cqyTgCcsDOvxDoBcAIvkoarGMOAT+auzrZOAJzQNpwjw4AkJSdEWycAADwwhgGf3HJWR+sEwAkFW8OsEwAnLNzDtSQASRrQ27oACI0xDPjkt+9vsE4AnHBFOPcZBiSp3cgJ1gkAAA+MYcAn4+a/YJ0AOKHbM3+zTgCc8On8jdYJgBMGWgcAtWAMAz6ZffI11gmAEzJefck6AXBCxojx1gkAAA+MYcAnxeXV1gmAGyKsAwA3REXyxQAALmMMAz5JjefLCZCk8ABX0AUkqSbA/ecBwGV89w74ZM7GMusEwAlTzuhunQA4IS0jzjoBAOCBMQz45KGfdLNOAJxQvm6xdQLghG/X51onAE4Y0yLDOgEIiTEM+OT9b7dZJwBOuKIV50kCktS3S0vrBACAB8Yw4JMA54YBkqTibfzDECBJe9vmWScATmjZMs06AQiJMQz4JLMF54YBkhSewwW0AEkqKa+yTgAAeGAMAz7p2CrJOgFwQlRFonUC4AReMQQAbmMMAz7pG19unQA4oapDR+sEwAn5hRXWCQAAD4xhwCdXT91jnQA44Q999lonAE4Ye9Y51gkAAA+MYcAnPVtFWScATijPybZOAJywfhtfC4AkDezD6TNwE2MY8ElYmHUB4IaMocOsEwAnpHbgCroA4DLGMOCTuKhw6wTACXsWLLBOAJywOYIxDEjSkAFcZBRuYgwDPunSOsE6AXBCRDa3GQMkqayi2joBAOCBMQz4pLC00joBcEJ0UrJ1AuCE5in8wxAAuIwxDPgknJOGAUlSWfY+6wTACXvzSqwTAAAeGMOAT8oqaqwTACeER0dbJwBOiI/h2ywAcBn/Kw34pEUKAwCQpMhiXhoKAADcxxgGfLJxNy+HAyRpZIfm1gmAE7plplsnAAA8MIYBn9QErAsAN+SvW2edADhhV/PO1gmAE4Y048KKcBNjGPBJIMAaBiQpLDLCOgFwQkQE958HAJcxhgGfDO2RZp0AOCF6d4p1AuCEbG65BwBOYwwDPtmdU2qdADihQylfC4Akccc9AHAbYxjwyda9DABAkobHsAAASUpL5srqAOAyxjDgk6/WllsnAE6YfFoL6wTACW0yEq0TAAAeGMOAT64byy00AEkq27PEOgFwwrdr9lgnAE4YPTTVOgEIiTEM+CSvmAulAJJUXc6rJAAAgPsYw4BPOvJyOECSlJjQ3joBcEJlYox1AgDAA2MY8MnCddnWCYATuqZXWScATkhPTbBOAAB4YAwDPtmVx8ukAUkqDXCeJCBJlWUV1gkAAA+MYcAnPx3b0ToBcEL0mr3WCYATNu7Kt04AnNCtcxvrBCAkxjDgk/jFX1knAE5IHXSCdQLghN2buP88ALiMMQz4ZGZ8b+sEwAmjFy2wTgCc0K3/aOsEAIAHxjDgk0Ubi60TACeMSuMCWoAktUiNt04AAHhgDAM+Of+bJ6wTACdkvvp36wTACbOXbrVOAJzQpVNr6wQgJMYw4JPS6x+wTgCcULZpnXUC4IST+nW1TgAAeGAMAz5ZvqXAOgFwQu9tq6wTACdURKZZJwBOGJiaZJ0AhMQYBnwSFx1unQA4IZy/WgAAwHGA71gAn0SEh1knAE6oLquwTgCcUF5ZbZ0AAPDAGEajNfD+Xx3TzxcdyZFhQJLKR5xlnQA4IeaTqdYJgBsG3m5dAITEGAZ8smJ7qXUC4ITrzm9pnQA4YVvnTtYJAAAPjGHAJ5nNo60TACdsy8q3TgCcULJrt3UCAMADYxjwSfv0WOsEwAlZucXWCYAT4kt5xRAAuIwxDPjkzbl51gmAE/528k7rBMAJba+73joBAOCBMQz45OwBidYJgBP2tuM8SUCScv/6tHUC4IRBt91pnQCExBgGfBIZwa2VAAD/JyyMuwwAgMsYw4BPvllbZJ0AOOGqCX2tEwAnbFnd1joBAOCBMQz4pE0qX06AJBWWVFgnAE6oruBrAQBcxnfvgE8qqwPWCYATKiqrrRMAJ1QVc2V1AHAZYxjwSYf0GOsEwAl7cjllAJAklZVbFwAAPDCGAZ+0SGEMA5JUXcOrJABJioqOtk4AAHhgDAM++cecHOsEwAl/785txgBJqhg+3DoBAOCBMQz4ZHw/BgAgSWuLI6wTACdEffmldQLghBaDT7ROAEJiDAM+iY5iAAASL5MG9ouyDgAAeGIMAz4pKq2yTgCcEBvNXy0AAMB9fMcC+KSyusY6AXBCRESYdQLgBG4yBgBuYwwDPtmeU2GdADghI43z5wFJ2pvI1wIAuIwxDPgkjINhgCRp34fvWCcATogYc551AgDAA2MY8El6IpdKASQpcvQ51gmAE8r+OdU6AXBDt5usC4CQGMOATzbsKbdOAJzQPiPFOgFwwtZmqdYJAAAPjGHAJ33axVknAE5Yvz3bOgFwQvXevdYJAAAPjGHAJ0lx3GcYkKSSMm4zBkhSdDXXkwYAlzGGAZ/ERjGGAUmqruE2Y4AkhYWFWycAADwwhgGfFJRwNAyQpHYtkq0TACeUtGxhnQAA8MAYBnyyZEuxdQLghJ+exfnzgCQVVVZaJwAAPDCGAZ+M7dfMOgFwwpotXDQIkKSY4hLrBACAB8Yw4JO9+dxaCZCk7u3CrBMAJ1SXl1knAAA8MIYBnyTE8uUESFJ1TcA6AXBCJBfQAgCn8d074JPlW4usEwAnnHdyZ+sEwAl7ExOtEwAAHhjDgE96tY23TgCcsGlnnnUC4ITowgLrBACAB8Yw4JPt2ZwzDEjSWUMTrBMAJxTGcWV1AHAZYxjwyQUj2lknAE5ITWIAAJKUdMpo6wQAgAfGMOCTr5bttk4AnBATHWGdADih+tNPrRMAJzTvN8A6AQiJMQz4ZGduhXUC4IQWqbxMGpCkbC6gBQBOYwwDPqkJcDsZQJKKZn9unQA4Ie28S60TAAAeGMOAT7q34jxJQJJyuw2xTgCcUPTGi9YJgBNa33q7dQIQEmMY8MnuPF4mDUhSpzbNrBMAJ2RntLROAAB4YAwDPtlXWGWdADghNibKOgEAAOCwGMOAT/p34KJBgCSt2rTHOgFwQlQWXwsA4DLGMOCT6Mhw6wTACdU1XEwOkCReIwEAbmMMAz75enWBdQLghMuGtrZOAJxQeMop1gkAAA+MYcAnl4zIsE4AnLBoR4l1AuCElLlzrRMAJ2QMGWadAITEGAZ8snIbR4YBSbpoVCfrBMAJpbEx1gkAAA+MYcAngQDnSQKSVFhcbp0AOCFQUGidAADwwBgGfPL99jLrBMAJN1yYbp0AOGEr9xkGAKcxhgGf9G4ba50AOGFbVr51AuCEiny+FgDAZYxhwCexUdxaCfj/7d17cJX1gcbx51wScnIlkAAhCGRhKCIil6AVZ2mGLN2CrlpBRLTgyFaLQZ1WXLS0zNSZVqadcRwIo6usraLCAoVFlCyMzUypMxSIVJDSQEDkknDJSQK5J+fy7h9MTs0STgFf/f3wfD9/KQnxOTOeSb55b5LUEQqbngBYIcp7AQCsRgwDLulFDAOSpNZ2AgCQpF6RiOkJAIA4iGHAJYFkn+kJgBV6JfFeACTJFwiYngAAiIMYBlzy7s7zpicAVpia3Wx6AmCF7BkzTU8AAMRBDAMuGT84yfQEwBI8ZgyQJG96pukJAIA4iGHAJcW38AgNQJIO+zymJwBW6PNf/2l6AmCFMU/92PQEoEfEMOCSd3bUmJ4AWGH5gommJwBWOPU3jgwDgM2IYcAltwxONT0BsELVyTrTEwAreOvqTU8AAMRBDAMuOVjdZnoCYIV/vyvH9ATACqf65ZqeAACIgxgGXDK4b7LpCYAVTgcbTU8ArNB5/oLpCQCAOIhhwCWHznSYngBYIfKn/zU9AbBC7syHTU8AAMRBDAMumXU7d5MGJOmfxo0zPQGwQqSDX5ICgM2IYcAltRfaTU8ArHCwmtOkAUnylv+P6QmAFcY8+bTpCUCPiGHAJf+9i2vDAEn6bm9iGJCknNmzTU8AAMRBDAMuua2AG2gBkhRu5RdDgCQ1i+8LgCSlmB4AXAYxDLhk1A3ppicAVqjpn2d6AmCF1LdWmZ4AWCHnmWdNTwB6RAwDLlmzs970BMAKG/5juOkJgBVOHC8wPQEAEAcxDLhk0vCA6QmAFQ5Xnzc9AbBC6PPPTU8AAMRBDAMuyUrl7QRIUnNbyPQEwAq9oo7pCQCAOPjpHXBJTmYv0xMAK4TCEdMTACuk+HymJwAA4iCGAZe0hwgAQJK8Xo/pCYAVnAjfFwDAZsQw4JLGFk4NBSQpyec1PQGwguNETU8AAMRBDAMu+fhYi+kJgBUe/u4o0xMAK5zq08f0BABAHMQw4JIwBwAASdLZbVtMTwCs0O/fZpqeAACIgxgGXJKZwqmhgCT1vXmM6QmAFVJCraYnAJbINj0A6BExDLhkcE6y6QmAFU4qw/QEwArOmndNTwCsMPaZZ01PAHpEDAMuGdA7xfQEwAqDoo2mJwBWSPvRE6YnAADiIIYBlxw81Wx6AmCFfym8yfQEwApn3n7L9ATACjc9vsD0BKBHxDDgklMNYdMTACu0V3xkegJghYI5c01PAADEQQwDLrkxr5fpCYAV2r41wvQEwAqHV71iegJghbE/XmR6AtAjYhhwSXISd5MGJMlxHNMTACt4PHxfAACbEcOAS1KIYUCS1NbBJQOAJPkjEdMTAABxEMOAS3Yc4gZagCQ9/r1vmZ4AWKH6xhtNTwAAxEEMAy6ZMDRgegJghcrTPFoJkKTI3/5megJgh+l3mV4A9IgYBlzSO423EyBJre2cJg1IErdVBAC78dM74JLGVgIAkKTUFL61AJLEFcMAYDd+YgFckpnK2wmQODIMdOHIMADYjZ/eAZd0hnicDCBJSX7urA4AAOxHDAMu+dNh7iYNSNKP7r7Z9ATACscGDjQ9AQAQBzEMuOSfR6SbngBY4dPPzpmeAFjBW1NjegIAIA5iGHBJsKnT9ATACn0yecwYIElNAd4LAGAzYhhwSS+ukwQkSX2zUk1PAKzgDMo3PQEAEAcxDLgkLcVnegJghaqTdaYnAFbwVh0xPQEAEAcxDLikvjlkegJghVED0kxPAKzQ8q/fMz0BABAHMQy4JC+bJ0oCkrTvVJPpCYAVkj/canoCYIW+o3nKAOxEDAMuOd/CkWFAktIDSaYnAFYIebiXBADYjBgGXJKdTgAAktTcxi+GAEnifCEAsBsxDLikpT1iegJghUH9Mk1PAKzQ1C/X9AQAQBzEMOASr8djegJgheD5FtMTACs452pNTwAAxEEMAy4p+5QAACTpvtxK0xMAKwx4dL7pCQCAOIhhwCUj+vGcYUCSUseONz0BsMKRjetNTwCsMGr+Y6YnAD0ihgGX9M/iBlqAJAXPt5qeAFgh1NRsegIAIA5iGHDJ9FvzTU8ArNDeGTY9AbBCxg03mJ4AAIiDGAZcsv5PJ0xPAKyw5K4hpicAVoiOG2d6AgAgDmIYcElGCtcMA5J0oo33AiBJnR9wzTAgSWN/vMj0BKBHxDDgkp2ftZueAFjh4fxPTU8ArJD32I9MTwAAxEEMAy6585YM0xMAK3hGFZieAFjh2HubTU8ArDDiwYdMTwB6RAwDLolGHdMTACucqWsyPQGwgr/6lOkJAIA4iGHAJZ+d4zRpQJIemNLH9ATACudyckxPAADEQQwDLhmZn2Z6AmCFqpN1picAVkipqzc9AQAQBzEMuMTv9ZieAFgh6nDJAAAAsB8xDLiktrHD9ATACnMKOE0akKTmlGmmJwAA4vA4Dr/CBwAAAAAkFq/pAQAAAAAAfN2IYQAAAABAwiGGAQAAAAAJhxgGAAAAACQcYhgAAAAAkHCIYQAAAABAwiGGAQAAAAAJhxgGAAAAACQcYhgAAAAAkHCIYQAAAABAwiGGAQAAAAAJhxgGAAAAACQcYhgAAAAAkHCIYQAAAABAwiGGAQAAAAAJhxgGAAAAACQcYhgAAAAAkHCIYQAAAABAwiGGAQAAAAAJhxjGdc9xHNMTAAAAAFxniGFcV3oKX4/Hc9mPAQAAAEBP/KYHAFcqEonI5/MpHA4rFApp9+7dCoVCikajGj16tLKyspSWlmZ6JgAAAIDrADGM60I4HJbf71djY6NWrlyp/fv36y9/+Uvs471799att96qGTNm6Dvf+Y7BpQAAAACuBx6Hc0thua4jwg0NDVqwYIE++eST2McCgYCSk5N14cKF2J/94he/0AMPPGBgKQAAAIDrBTEMq0WjUXm9Xp0/f16PPPKIKisrNXDgQBUVFWn69OnKzs6WJL366qvauXOngsGgJGnBggV6+umnTU4HAAAAYDFOk4bVvF6v2tra9MILL6iyslI33HCDSkpKNHnyZPXp0yf2eYsWLdLSpUu1a9cutbe3q6KiwuBqAAAAALbjbtKwVjQalSTt2rVLe/fuVVpammbPnq3i4uJuIVxbW6vy8nLt3btX7e3tKioq0urVq7t9DQAAAAD4ImIY1vJ6L/7vuWPHDp05c0Y5OTkqLi5WRkZG7HPq6+tVVlaml156SU1NTSoqKtKrr74qSers7Ix9DQAAAAD4IkoBVuvs7NRf//pXSdL06dM1dOjQ2POE6+vr9f7772vFihWXhHA4HFZycrIkqbKyUidPnjTzAgAAAABYiRiG1To6OnT27FlJUmtrqyTJ4/H8wxD2+/2KRqMKhULauHGj1qxZo9OnTxt7HQAAAADswg20YDWv16uMjAydOXNGkUhE0sUjwlu2bFFpaellQ9hxHHm9Xh0+fFhvvfWWJGnSpEnKy8sz9loAAAAA2IMYhtXS0tJ0yy23qKqqSps2bdK3v/1tBYPBuCEsXTx6XFtbqxUrVkiSJk6cqNtvv93Y6wAAAABgF2IYRnUFbGdnp6LRqFJSUmIfcxxHHo9HI0eOlHTx+uHS0lKdOHFCra2tlw1h6eIp1du3b9e+ffuUlZWlO++8Uz6fL/Y1AQAAACQ2rhmGMY7jyO/3KxgMavbs2fr9738fuy74ix588EHdfPPNCoVCOnLkiFpbWzVx4sRud432+Xyxz29vb9fOnTu1bt06BYNBjRs3TlOnTpUkQhgAAACAJGIYBnk8HrW1teknP/mJDh48qBUrVqisrKzbjbLC4bB8Pp+ef/55DR48WOFwWElJSRo0aJAqKyslScnJybHIDQaDKisr08qVK3Xo0CHl5+dryZIl6tu3r7HXCQAAAMA+HqfrOTWAAQ0NDXrjjTe0fft2HT9+XBkZGXruuec0bdo0paamxj6vo6NDZWVlWr58uWpqahQIBBQIBDR37lwNHz5cmZmZamxs1DvvvKPPP/9cp0+fVk5Ojt58800NGzbM4CsEAAAAYCNiGMbV1dVp3bp12rBhg6qrqy8bxE1NTfroo4+0fPlyHTt2LPbnqampchxHnZ2dikQi6tWrl4YNG6aXXnpJQ4cONfCKAAAAANiOGIYVrjSIw+Gwqqur9atf/UrHjh3TiRMnun2dcePG6Y477tDMmTM1YMCAr/tlAAAAALhOEMOwxpUEcdfdoCORiI4fP64DBw6oo6NDSUlJSk9P1+TJk+XxeJSUlGT41QAAAACwGTGMr9wXH2cUjUbl9V7+vm1XEsSRSKTb3aPj/fcAAAAAoCfcTRpfqXA4HLtrtCR5vV5FIpHLfn7fvn01a9YszZw5U/n5+WpqatKyZcu63WU6XghLPD4JAAAAwD9GDOMr0/Uc4bq6OhUVFenXv/61pIsxeyVB/P3vf1/9+/dXU1OTXnzxRW3durXH5xADAAAAwNUihvGV6ToivHDhQl24cEFvvPGGSktLJV1ZEN9///0aP368PB6PmpubCWIAAAAAriGG4bpoNBr750AgoLFjxyovL0+SVFpaesVB3L9/f82fP1/JycmSpJaWFi1btkzbtm1TS0vLV/gKAAAAAHzTEcNwVTgcltfrVUtLi44ePSpJWrx4sWbMmKHc3FxJVx7EjuNo5MiRGjlypDIzMzVw4EA1Nzfr+eef1x/+8Adx7zcAAAAA14oYhmsikYj8fr/q6+s1Z84cLV++XJWVlZKkhQsXavbs2VcVxB6PR36/X8nJyUpPT9fdd9+tjIwMSdJNN93EjbIAAAAAXDO/6QH4ZnAcRz6fT42NjXryySd16NAhtbS0qKKiQoMGDVJ6erpKSkokSWvXrlVtbW0shhcuXCifz9fjY5eqq6v12WefqaCgQPfff78yMzM1ZcoUDR069Ot+iQAAAAC+QTgyDFd4PB5FIhG9/vrr+vjjjzVkyBDNmzdPxcXFSk9Pjx35LSkpuewR4q4Q7rrmuK2tTR9++KHq6urUu3dv5efn65FHHiGEAQAAAHxpHBnGl/LFo7nRaFS7d+9WRkaG5s6dq3vuuUfp6emS1O3Ib0lJiTwej9asWRM7QtzR0aHHH39cgUBAPp9PTU1N2r17tzZu3ChJmjRpkiSeIQwAAADAHcQwrlk4HJbf71dzc7Nqamrk8/m0b98+jRgxQkVFRbEQ7uL1emNB/MQTT0iS1q9fr9OnT+v111/X0aNHNXz4cI0ZM0bl5eX69NNPVVVVpREjRqi4uFgSMQwAAADAHcQwrpnf71cwGNS8efM0bNgw3XbbbZKk4uJi5efny3GcS+L1/wdxSkqKNm/erEOHDqm8vFzl5eXdbqiVm5url19+Wf379//aXx8AAACAby5iGNcsEonoxRdf1NGjR1VTU6OmpiZJUkNDgyT1GMNS9yB+9NFHNXjwYO3YsUPr1q2LhXBeXp4KCgq0dOlSrhEGAAAA4DqPw8NacRUikYh8Pl/s3/fu3auXX35Zu3fvjoXs1KlTtWLFCkmXD2JJl9w9es+ePaqtrVVdXZ0KCws1YMAAZWdnf7UvCAAAAEBCIobRI8dxYs8N7orWrmuEGxoatGfPHk2ePFkpKSnav3+/li1bpr1798b+/rJly3TvvffGvhbX+gIAAACwCY9WwiWi0ai2b9+ud999V83NzfJ6vers7JTf71d9fb3uvfdePfXUU/rjH/8oSRo9erSee+45jR8/XtLF06A3b96siooKSRdvesXvXAAAAADYhBhGN47jaNOmTfrlL3+p1157TatWrdKFCxeUnJysYDCohx56SGfPnlVOTo48Ho/C4bC8Xm8siCdMmKBoNKo9e/Zo7dq12r9/vySCGAAAAIBdiGF04/F4lJaWpnPnzikYDOr999/X2rVrdfLkSc2bN0/Hjh3TkCFD9NOf/lR33HGH/P6L92DrCuLFixersLBQ4XBY27Zt09tvv00QAwAAALAO1wyjR+vWrdPSpUslSQMHDlRra6vOnz+vgoIClZSUaMqUKUpNTb3k70WjUR04cEC/+c1vtGfPHiUlJWnatGl6+OGHNWbMGElcQwwAAADAPGIYPXIcR1u3btUzzzwT+7OsrCwtXrxY9913X9y/SxADAAAAsB2nSeMSXaE6adIkDRgwIBatPp9PR48eVUdHh6SL0duTrlOmn332WU2cOFGhUEhlZWWcMg0AAADAGsQwLuHxeBQKhTR//nydOXNGubm5kqT6+npt27ZNr7zySuwu01cbxGvWrIk9gokjwwAAAABMIYbRo6SkJD322GO68cYbtWjRIv3sZz+TJJ06dUpbtmzRqlWr1NLScsVBXFhYqFAopE2bNmnz5s2xo8sAAAAAYALXDCOu2tpapaWlKTU1tdtNtQYNGqS77rpLP/zhD5WWlqZoNCqvt+ffrTiOowMHDmjJkiU6fPiwPvjgAw0bNuzrfBkAAAAA0A0xjKuyfv16/fznP5f0j4M4FAqpoaFB2dnZSkpK0sGDB5WVlaX8/HxT8wEAAABAEjGMaxDvCHEkEpHP51MoFNKuXbu0atUqFRUVac6cOUpOTja8HAAAAAAu4pphXLVZs2bphRdekPT3a4hfe+01NTY2yufzqa2tTRUVFVq5cqX+/Oc/a/Xq1VwjDAAAAMAqHBnGNfviEeK8vDwVFhbqBz/4gT755BO99957OnDggHJycvS73/1Ow4cPN7wWAAAAAP6OGMaXsmHDhtidpiUpEAhIktra2tSvXz/99re/5WZZAAAAAKzDadL4UmbOnKnS0lJlZ2crEAiora1NgUBAEydO1OrVqwlhAAAAAFbiyDBcUVlZqSNHjqiqqkoTJkzQqFGjlJOTY3oWAAAAAPSIGMaX4jiOPB6P6RkAAAAAcFU4TRpfCiEMAAAA4HpEDAMAAAAAEg4xDAAAAABIOMQwAAAAACDhEMMAAAAAgIRDDAMAAAAAEg4xDAAAAABIOMQwAAAAACDhEMMAAAAAgIRDDAMAAAAAEg4xDAAAAABIOMQwAAAAACDhEMMAAAAAgIRDDAMAAAAAEg4xDAAAAABIOMQwAAAAACDh/B9QSwffjWqE/wAAAABJRU5ErkJggg==", + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " [18690/18690 26:56, Epoch 10/10]\n", + "
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
EpochTraining LossValidation LossAccuracyMacro F1Weighted F1
10.3885000.3855030.8781880.6738870.871348
20.3159000.3027750.9074370.7541820.903474
30.2426000.3218440.9079720.7795040.905881
40.2386000.3231190.9115390.7909220.910299
50.1601000.3282030.9156410.7934900.913836
60.1631000.3489420.9174250.8136040.916911
70.1241000.3737990.9168900.8203550.916688
80.1187000.3994740.9168900.8188390.916640
90.0668000.4143630.9176030.8307030.917226
100.0758000.4138280.9190300.8281490.918506

" + ], "text/plain": [ - "

" + "" ] }, "metadata": {}, "output_type": "display_data" - } - ], - "source": [ - "cc.plot_predictions(\n", - " predictions_file=f\"{output_dir}/{output_prefix}_pred_dict.pkl\",\n", - " id_class_dict_file=f\"{output_dir}/{output_prefix}_id_class_dict.pkl\",\n", - " title=\"disease\",\n", - " output_directory=output_dir,\n", - " output_prefix=output_prefix,\n", - " custom_class_order=[\"nf\",\"hcm\",\"dcm\"],\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "167f8023-82fa-4c05-8f0c-ea45b9c9c199", - "metadata": {}, - "outputs": [ + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n", + ":54: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n" + ] + }, { "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " [468/468 00:39]\n", + "
\n", + " " + ], "text/plain": [ - "{'conf_matrix': nf hcm dcm\n", - " nf 3794 385 328\n", - " hcm 562 8680 566\n", - " dcm 13 485 2415,\n", - " 'macro_f1': 0.8426513907521005,\n", - " 'acc': 0.864232644532157,\n", - " 'all_roc_metrics': None}" + "" ] }, - "execution_count": 5, "metadata": {}, - "output_type": "execute_result" + "output_type": "display_data" } ], "source": [ - "all_metrics_test" + "for organ in organ_list:\n", + " print(organ)\n", + " organ_trainset = trainset_dict[organ]\n", + " organ_evalset = evalset_dict[organ]\n", + " organ_label_dict = traintargetdict_dict[organ]\n", + " \n", + " # set logging steps\n", + " logging_steps = round(len(organ_trainset)/geneformer_batch_size/10)\n", + " \n", + " # reload pretrained model\n", + " model = BertForSequenceClassification.from_pretrained(\"/path/to/pretrained_model/\", \n", + " num_labels=len(organ_label_dict.keys()),\n", + " output_attentions = False,\n", + " output_hidden_states = False).to(\"cuda\")\n", + " \n", + " # define output directory path\n", + " current_date = datetime.datetime.now()\n", + " datestamp = f\"{str(current_date.year)[-2:]}{current_date.month:02d}{current_date.day:02d}\"\n", + " output_dir = f\"/path/to/models/{datestamp}_geneformer_CellClassifier_{organ}_L{max_input_size}_B{geneformer_batch_size}_LR{max_lr}_LS{lr_schedule_fn}_WU{warmup_steps}_E{epochs}_O{optimizer}_F{freeze_layers}/\"\n", + " \n", + " # ensure not overwriting previously saved model\n", + " saved_model_test = os.path.join(output_dir, f\"pytorch_model.bin\")\n", + " if os.path.isfile(saved_model_test) == True:\n", + " raise Exception(\"Model already saved to this directory.\")\n", + "\n", + " # make output directory\n", + " subprocess.call(f'mkdir {output_dir}', shell=True)\n", + " \n", + " # set training arguments\n", + " training_args = {\n", + " \"learning_rate\": max_lr,\n", + " \"do_train\": True,\n", + " \"do_eval\": True,\n", + " \"evaluation_strategy\": \"epoch\",\n", + " \"save_strategy\": \"epoch\",\n", + " \"logging_steps\": logging_steps,\n", + " \"group_by_length\": True,\n", + " \"length_column_name\": \"length\",\n", + " \"disable_tqdm\": False,\n", + " \"lr_scheduler_type\": lr_schedule_fn,\n", + " \"warmup_steps\": warmup_steps,\n", + " \"weight_decay\": 0.001,\n", + " \"per_device_train_batch_size\": geneformer_batch_size,\n", + " \"per_device_eval_batch_size\": geneformer_batch_size,\n", + " \"num_train_epochs\": epochs,\n", + " \"load_best_model_at_end\": True,\n", + " \"output_dir\": output_dir,\n", + " }\n", + " \n", + " training_args_init = TrainingArguments(**training_args)\n", + "\n", + " # create the trainer\n", + " trainer = Trainer(\n", + " model=model,\n", + " args=training_args_init,\n", + " data_collator=DataCollatorForCellClassification(),\n", + " train_dataset=organ_trainset,\n", + " eval_dataset=organ_evalset,\n", + " compute_metrics=compute_metrics\n", + " )\n", + " # train the cell type classifier\n", + " trainer.train()\n", + " predictions = trainer.predict(organ_evalset)\n", + " with open(f\"{output_dir}predictions.pickle\", \"wb\") as fp:\n", + " pickle.dump(predictions, fp)\n", + " trainer.save_metrics(\"eval\",predictions.metrics)\n", + " trainer.save_model(output_dir)" ] } ], @@ -450,7 +1939,12 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.15" + "version": "3.10.11" + }, + "vscode": { + "interpreter": { + "hash": "eba1599a1f7e611c14c87ccff6793920aa63510b01fc0e229d6dd014149b8829" + } } }, "nbformat": 4, diff --git a/examples/extract_and_plot_cell_embeddings.ipynb b/examples/extract_and_plot_cell_embeddings.ipynb index f00388708664a1cd0c774bfa13f0c01d0ee6578d..a0a3de41c1a7f42bde244a1c051b6d1f714c7bbf 100644 --- a/examples/extract_and_plot_cell_embeddings.ipynb +++ b/examples/extract_and_plot_cell_embeddings.ipynb @@ -18,8 +18,6 @@ "outputs": [], "source": [ "# initiate EmbExtractor\n", - "# OF NOTE: token_dictionary_file must be set to the gc-30M token dictionary if using a 30M series model\n", - "# (otherwise the EmbExtractor will use the current default model dictionary)\n", "embex = EmbExtractor(model_type=\"CellClassifier\",\n", " num_classes=3,\n", " filter_data={\"cell_type\":[\"Cardiomyocyte1\",\"Cardiomyocyte2\",\"Cardiomyocyte3\"]},\n", @@ -28,13 +26,11 @@ " emb_label=[\"disease\",\"cell_type\"],\n", " labels_to_plot=[\"disease\"],\n", " forward_batch_size=200,\n", - " nproc=16,\n", - " token_dictionary_file=\"./gene_dictionaries_30m/token_dictionary_gc30M.pkl\") # change from current default dictionary for 30M model series\n", + " nproc=16)\n", "\n", "# extracts embedding from input data\n", - "# input data is tokenized rank value encodings generated by Geneformer tokenizer (see tokenizing_scRNAseq_data.ipynb)\n", - "# example dataset for 30M model series: https://huggingface.co/datasets/ctheodoris/Genecorpus-30M/tree/main/example_input_files/cell_classification/disease_classification/human_dcm_hcm_nf.dataset\n", - "embs = embex.extract_embs(\"../fine_tuned_models/gf-6L-30M-i2048_CellClassifier_cardiomyopathies_220224\", # example 30M fine-tuned model\n", + "# example dataset: https://huggingface.co/datasets/ctheodoris/Genecorpus-30M/tree/main/example_input_files/cell_classification/disease_classification/human_dcm_hcm_nf.dataset\n", + "embs = embex.extract_embs(\"../fine_tuned_models/geneformer-6L-30M_CellClassifier_cardiomyopathies_220224\",\n", " \"path/to/input_data/\",\n", " \"path/to/output_directory/\",\n", " \"output_prefix\")\n" @@ -132,7 +128,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.15" + "version": "3.10.11" } }, "nbformat": 4, diff --git a/examples/gene_classification.ipynb b/examples/gene_classification.ipynb index 284da7a1cc5846566d8b599ac2b549f6dc20f4a4..a73fa2f8b55281c1d330862de4983966a46c33a1 100644 --- a/examples/gene_classification.ipynb +++ b/examples/gene_classification.ipynb @@ -2,207 +2,593 @@ "cells": [ { "cell_type": "markdown", - "id": "08f41458-5304-48c5-9e92-f9b56ab052c4", "metadata": {}, "source": [ "## Geneformer Fine-Tuning for Classification of Dosage-Sensitive vs. -Insensitive Transcription Factors (TFs)" ] }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "GPU_NUMBER = [0]\n", + "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \",\".join([str(s) for s in GPU_NUMBER])\n", + "os.environ[\"NCCL_DEBUG\"] = \"INFO\"" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "# imports\n", + "import datetime\n", + "import subprocess\n", + "import math\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import pandas as pd\n", + "from datasets import load_from_disk\n", + "from sklearn import preprocessing\n", + "from sklearn.metrics import accuracy_score, auc, confusion_matrix, ConfusionMatrixDisplay, roc_curve\n", + "from sklearn.model_selection import StratifiedKFold\n", + "import torch\n", + "from transformers import BertForTokenClassification\n", + "from transformers import Trainer\n", + "from transformers.training_args import TrainingArguments\n", + "from tqdm.notebook import tqdm\n", + "\n", + "from geneformer import DataCollatorForGeneClassification\n", + "from geneformer.pretrainer import token_dictionary" + ] + }, { "cell_type": "markdown", - "id": "79539e95-2c9c-4162-835c-f0d158abb15d", "metadata": {}, "source": [ - "### Please note that, as usual with deep learning models, we **highly** recommend tuning learning hyperparameters for all fine-tuning applications as this can significantly improve model performance. Example below uses default hyperparameters, but please see the \"hyperparam_optimiz_for_disease_classifier\" script for an example of how to tune hyperparameters for downstream applications." + "## Load Gene Attribute Information" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "# table of corresponding Ensembl IDs, gene names, and gene types (e.g. coding, miRNA, etc.)\n", + "gene_info = pd.read_csv(\"/path/to/gene_info_table.csv\", index_col=0)\n", + "\n", + "# create dictionaries for corresponding attributes\n", + "gene_id_type_dict = dict(zip(gene_info[\"ensembl_id\"],gene_info[\"gene_type\"]))\n", + "gene_name_id_dict = dict(zip(gene_info[\"gene_name\"],gene_info[\"ensembl_id\"]))\n", + "gene_id_name_dict = {v: k for k,v in gene_name_id_dict.items()}" ] }, { "cell_type": "markdown", - "id": "51b4852a-9f03-4bc3-ba33-79eaa4582d50", "metadata": {}, "source": [ - "### Train gene classifier with 5-fold cross-validation:" + "## Load Training Data and Class Labels" ] }, { "cell_type": "code", - "execution_count": 1, - "id": "58d59e09-5e6c-4fba-ba2b-3aee103869fd", + "execution_count": 9, "metadata": {}, "outputs": [], "source": [ - "import datetime\n", - "import pickle\n", - "from geneformer import Classifier\n", + "# function for preparing targets and labels\n", + "def prep_inputs(genegroup1, genegroup2, id_type):\n", + " if id_type == \"gene_name\":\n", + " targets1 = [gene_name_id_dict[gene] for gene in genegroup1 if gene_name_id_dict.get(gene) in token_dictionary]\n", + " targets2 = [gene_name_id_dict[gene] for gene in genegroup2 if gene_name_id_dict.get(gene) in token_dictionary]\n", + " elif id_type == \"ensembl_id\":\n", + " targets1 = [gene for gene in genegroup1 if gene in token_dictionary]\n", + " targets2 = [gene for gene in genegroup2 if gene in token_dictionary]\n", + " \n", + " targets1_id = [token_dictionary[gene] for gene in targets1]\n", + " targets2_id = [token_dictionary[gene] for gene in targets2]\n", + " \n", + " targets = np.array(targets1_id + targets2_id)\n", + " labels = np.array([0]*len(targets1_id) + [1]*len(targets2_id))\n", + " nsplits = min(5, min(len(targets1_id), len(targets2_id))-1)\n", + " assert nsplits > 2\n", + " print(f\"# targets1: {len(targets1_id)}\\n# targets2: {len(targets2_id)}\\n# splits: {nsplits}\")\n", + " return targets, labels, nsplits" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# preparing targets and labels for dosage sensitive vs insensitive TFs\n", + "dosage_tfs = pd.read_csv(\"/path/to/dosage_sens_tf_labels.csv\", header=0)\n", + "sensitive = dosage_tfs[\"dosage_sensitive\"].dropna()\n", + "insensitive = dosage_tfs[\"dosage_insensitive\"].dropna()\n", + "targets, labels, nsplits = prep_inputs(sensitive, insensitive, \"ensembl_id\")" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "# load training dataset\n", + "train_dataset=load_from_disk(\"/path/to/gene_train_data.dataset\")\n", + "shuffled_train_dataset = train_dataset.shuffle(seed=42)\n", + "subsampled_train_dataset = shuffled_train_dataset.select([i for i in range(50_000)])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Define Functions for Training and Cross-Validating Classifier" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "def preprocess_classifier_batch(cell_batch, max_len):\n", + " if max_len == None:\n", + " max_len = max([len(i) for i in cell_batch[\"input_ids\"]])\n", + " def pad_label_example(example):\n", + " example[\"labels\"] = np.pad(example[\"labels\"], \n", + " (0, max_len-len(example[\"input_ids\"])), \n", + " mode='constant', constant_values=-100)\n", + " example[\"input_ids\"] = np.pad(example[\"input_ids\"], \n", + " (0, max_len-len(example[\"input_ids\"])), \n", + " mode='constant', constant_values=token_dictionary.get(\"\"))\n", + " example[\"attention_mask\"] = (example[\"input_ids\"] != token_dictionary.get(\"\")).astype(int)\n", + " return example\n", + " padded_batch = cell_batch.map(pad_label_example)\n", + " return padded_batch\n", "\n", - "current_date = datetime.datetime.now()\n", - "datestamp = f\"{str(current_date.year)[-2:]}{current_date.month:02d}{current_date.day:02d}{current_date.hour:02d}{current_date.minute:02d}{current_date.second:02d}\"\n", - "datestamp_min = f\"{str(current_date.year)[-2:]}{current_date.month:02d}{current_date.day:02d}\"\n", + "# forward batch size is batch size for model inference (e.g. 200)\n", + "def classifier_predict(model, evalset, forward_batch_size, mean_fpr):\n", + " predict_logits = []\n", + " predict_labels = []\n", + " model.eval()\n", + " \n", + " # ensure there is at least 2 examples in each batch to avoid incorrect tensor dims\n", + " evalset_len = len(evalset)\n", + " max_divisible = find_largest_div(evalset_len, forward_batch_size)\n", + " if len(evalset) - max_divisible == 1:\n", + " evalset_len = max_divisible\n", + " \n", + " max_evalset_len = max(evalset.select([i for i in range(evalset_len)])[\"length\"])\n", + " \n", + " for i in range(0, evalset_len, forward_batch_size):\n", + " max_range = min(i+forward_batch_size, evalset_len)\n", + " batch_evalset = evalset.select([i for i in range(i, max_range)])\n", + " padded_batch = preprocess_classifier_batch(batch_evalset, max_evalset_len)\n", + " padded_batch.set_format(type=\"torch\")\n", + " \n", + " input_data_batch = padded_batch[\"input_ids\"]\n", + " attn_msk_batch = padded_batch[\"attention_mask\"]\n", + " label_batch = padded_batch[\"labels\"]\n", + " with torch.no_grad():\n", + " outputs = model(\n", + " input_ids = input_data_batch.to(\"cuda\"), \n", + " attention_mask = attn_msk_batch.to(\"cuda\"), \n", + " labels = label_batch.to(\"cuda\"), \n", + " )\n", + " predict_logits += [torch.squeeze(outputs.logits.to(\"cpu\"))]\n", + " predict_labels += [torch.squeeze(label_batch.to(\"cpu\"))]\n", + " \n", + " logits_by_cell = torch.cat(predict_logits)\n", + " all_logits = logits_by_cell.reshape(-1, logits_by_cell.shape[2])\n", + " labels_by_cell = torch.cat(predict_labels)\n", + " all_labels = torch.flatten(labels_by_cell)\n", + " logit_label_paired = [item for item in list(zip(all_logits.tolist(), all_labels.tolist())) if item[1]!=-100]\n", + " y_pred = [vote(item[0]) for item in logit_label_paired]\n", + " y_true = [item[1] for item in logit_label_paired]\n", + " logits_list = [item[0] for item in logit_label_paired]\n", + " # probability of class 1\n", + " y_score = [py_softmax(item)[1] for item in logits_list]\n", + " conf_mat = confusion_matrix(y_true, y_pred)\n", + " fpr, tpr, _ = roc_curve(y_true, y_score)\n", + " # plot roc_curve for this split\n", + " plt.plot(fpr, tpr)\n", + " plt.xlim([0.0, 1.0])\n", + " plt.ylim([0.0, 1.05])\n", + " plt.xlabel('False Positive Rate')\n", + " plt.ylabel('True Positive Rate')\n", + " plt.title('ROC')\n", + " plt.show()\n", + " # interpolate to graph\n", + " interp_tpr = np.interp(mean_fpr, fpr, tpr)\n", + " interp_tpr[0] = 0.0\n", + " return fpr, tpr, interp_tpr, conf_mat \n", "\n", - "output_prefix = \"tf_dosage_sens_test\"\n", - "output_dir = f\"/path/to/output_dir/{datestamp}\"\n", - "!mkdir $output_dir" + "def vote(logit_pair):\n", + " a, b = logit_pair\n", + " if a > b:\n", + " return 0\n", + " elif b > a:\n", + " return 1\n", + " elif a == b:\n", + " return \"tie\"\n", + " \n", + "def py_softmax(vector):\n", + "\te = np.exp(vector)\n", + "\treturn e / e.sum()\n", + " \n", + "# get cross-validated mean and sd metrics\n", + "def get_cross_valid_metrics(all_tpr, all_roc_auc, all_tpr_wt):\n", + " wts = [count/sum(all_tpr_wt) for count in all_tpr_wt]\n", + " print(wts)\n", + " all_weighted_tpr = [a*b for a,b in zip(all_tpr, wts)]\n", + " mean_tpr = np.sum(all_weighted_tpr, axis=0)\n", + " mean_tpr[-1] = 1.0\n", + " all_weighted_roc_auc = [a*b for a,b in zip(all_roc_auc, wts)]\n", + " roc_auc = np.sum(all_weighted_roc_auc)\n", + " roc_auc_sd = math.sqrt(np.average((all_roc_auc-roc_auc)**2, weights=wts))\n", + " return mean_tpr, roc_auc, roc_auc_sd\n", + "\n", + "# Function to find the largest number smaller\n", + "# than or equal to N that is divisible by k\n", + "def find_largest_div(N, K):\n", + " rem = N % K\n", + " if(rem == 0):\n", + " return N\n", + " else:\n", + " return N - rem" ] }, { "cell_type": "code", - "execution_count": 2, - "id": "9e33942f-39e4-4db4-a3de-5949bed9fa5d", + "execution_count": 17, "metadata": {}, "outputs": [], "source": [ - "# Example input_data_file: https://huggingface.co/datasets/ctheodoris/Genecorpus-30M/blob/main/example_input_files/gene_classification/dosage_sensitive_tfs/dosage_sensitivity_TFs.pickle\n", - "with open(\"/path/to/dosage_sensitivity_TFs.pickle\", \"rb\") as fp:\n", - " gene_class_dict = pickle.load(fp)" + "# cross-validate gene classifier\n", + "def cross_validate(data, targets, labels, nsplits, subsample_size, training_args, freeze_layers, output_dir, num_proc):\n", + " # check if output directory already written to\n", + " # ensure not overwriting previously saved model\n", + " model_dir_test = os.path.join(output_dir, \"ksplit0/models/pytorch_model.bin\")\n", + " if os.path.isfile(model_dir_test) == True:\n", + " raise Exception(\"Model already saved to this directory.\")\n", + " \n", + " # initiate eval metrics to return\n", + " num_classes = len(set(labels))\n", + " mean_fpr = np.linspace(0, 1, 100)\n", + " all_tpr = []\n", + " all_roc_auc = []\n", + " all_tpr_wt = []\n", + " label_dicts = []\n", + " confusion = np.zeros((num_classes,num_classes))\n", + " \n", + " # set up cross-validation splits\n", + " skf = StratifiedKFold(n_splits=nsplits, random_state=0, shuffle=True)\n", + " # train and evaluate\n", + " iteration_num = 0\n", + " for train_index, eval_index in tqdm(skf.split(targets, labels)):\n", + " if len(labels) > 500:\n", + " print(\"early stopping activated due to large # of training examples\")\n", + " nsplits = 3\n", + " if iteration_num == 3:\n", + " break\n", + " print(f\"****** Crossval split: {iteration_num}/{nsplits-1} ******\\n\")\n", + " # generate cross-validation splits\n", + " targets_train, targets_eval = targets[train_index], targets[eval_index]\n", + " labels_train, labels_eval = labels[train_index], labels[eval_index]\n", + " label_dict_train = dict(zip(targets_train, labels_train))\n", + " label_dict_eval = dict(zip(targets_eval, labels_eval))\n", + " label_dicts += (iteration_num, targets_train, targets_eval, labels_train, labels_eval)\n", + " \n", + " # function to filter by whether contains train or eval labels\n", + " def if_contains_train_label(example):\n", + " a = label_dict_train.keys()\n", + " b = example['input_ids']\n", + " return not set(a).isdisjoint(b)\n", + "\n", + " def if_contains_eval_label(example):\n", + " a = label_dict_eval.keys()\n", + " b = example['input_ids']\n", + " return not set(a).isdisjoint(b)\n", + " \n", + " # filter dataset for examples containing classes for this split\n", + " print(f\"Filtering training data\")\n", + " trainset = data.filter(if_contains_train_label, num_proc=num_proc)\n", + " print(f\"Filtered {round((1-len(trainset)/len(data))*100)}%; {len(trainset)} remain\\n\")\n", + " print(f\"Filtering evalation data\")\n", + " evalset = data.filter(if_contains_eval_label, num_proc=num_proc)\n", + " print(f\"Filtered {round((1-len(evalset)/len(data))*100)}%; {len(evalset)} remain\\n\")\n", + "\n", + " # minimize to smaller training sample\n", + " training_size = min(subsample_size, len(trainset))\n", + " trainset_min = trainset.select([i for i in range(training_size)])\n", + " eval_size = min(training_size, len(evalset))\n", + " half_training_size = round(eval_size/2)\n", + " evalset_train_min = evalset.select([i for i in range(half_training_size)])\n", + " evalset_oos_min = evalset.select([i for i in range(half_training_size, eval_size)])\n", + " \n", + " # label conversion functions\n", + " def generate_train_labels(example):\n", + " example[\"labels\"] = [label_dict_train.get(token_id, -100) for token_id in example[\"input_ids\"]]\n", + " return example\n", + "\n", + " def generate_eval_labels(example):\n", + " example[\"labels\"] = [label_dict_eval.get(token_id, -100) for token_id in example[\"input_ids\"]]\n", + " return example\n", + " \n", + " # label datasets \n", + " print(f\"Labeling training data\")\n", + " trainset_labeled = trainset_min.map(generate_train_labels)\n", + " print(f\"Labeling evaluation data\")\n", + " evalset_train_labeled = evalset_train_min.map(generate_eval_labels)\n", + " print(f\"Labeling evaluation OOS data\")\n", + " evalset_oos_labeled = evalset_oos_min.map(generate_eval_labels)\n", + " \n", + " # create output directories\n", + " ksplit_output_dir = os.path.join(output_dir, f\"ksplit{iteration_num}\")\n", + " ksplit_model_dir = os.path.join(ksplit_output_dir, \"models/\") \n", + " \n", + " # ensure not overwriting previously saved model\n", + " model_output_file = os.path.join(ksplit_model_dir, \"pytorch_model.bin\")\n", + " if os.path.isfile(model_output_file) == True:\n", + " raise Exception(\"Model already saved to this directory.\")\n", + "\n", + " # make training and model output directories\n", + " subprocess.call(f'mkdir {ksplit_output_dir}', shell=True)\n", + " subprocess.call(f'mkdir {ksplit_model_dir}', shell=True)\n", + " \n", + " # load model\n", + " model = BertForTokenClassification.from_pretrained(\n", + " \"/gladstone/theodoris/lab/ctheodoris/archive/geneformer_files/geneformer/210602_111318_geneformer_27M_L6_emb256_SL2048_E3_B12_LR0.001_LSlinear_WU10000_Oadamw_DS12/models/\",\n", + " num_labels=2,\n", + " output_attentions = False,\n", + " output_hidden_states = False\n", + " )\n", + " if freeze_layers is not None:\n", + " modules_to_freeze = model.bert.encoder.layer[:freeze_layers]\n", + " for module in modules_to_freeze:\n", + " for param in module.parameters():\n", + " param.requires_grad = False\n", + " \n", + " model = model.to(\"cuda:0\")\n", + " \n", + " # add output directory to training args and initiate\n", + " training_args[\"output_dir\"] = ksplit_output_dir\n", + " training_args_init = TrainingArguments(**training_args)\n", + " \n", + " # create the trainer\n", + " trainer = Trainer(\n", + " model=model,\n", + " args=training_args_init,\n", + " data_collator=DataCollatorForGeneClassification(),\n", + " train_dataset=trainset_labeled,\n", + " eval_dataset=evalset_train_labeled\n", + " )\n", + "\n", + " # train the gene classifier\n", + " trainer.train()\n", + " \n", + " # save model\n", + " trainer.save_model(ksplit_model_dir)\n", + " \n", + " # evaluate model\n", + " fpr, tpr, interp_tpr, conf_mat = classifier_predict(trainer.model, evalset_oos_labeled, 200, mean_fpr)\n", + " \n", + " # append to tpr and roc lists\n", + " confusion = confusion + conf_mat\n", + " all_tpr.append(interp_tpr)\n", + " all_roc_auc.append(auc(fpr, tpr))\n", + " # append number of eval examples by which to weight tpr in averaged graphs\n", + " all_tpr_wt.append(len(tpr))\n", + " \n", + " iteration_num = iteration_num + 1\n", + " \n", + " # get overall metrics for cross-validation\n", + " mean_tpr, roc_auc, roc_auc_sd = get_cross_valid_metrics(all_tpr, all_roc_auc, all_tpr_wt)\n", + " return all_roc_auc, roc_auc, roc_auc_sd, mean_fpr, mean_tpr, confusion, label_dicts" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Define Functions for Plotting Results" ] }, { "cell_type": "code", - "execution_count": 3, - "id": "f4053ee9-3506-4c97-b544-8d667f0adfab", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "# plot ROC curve\n", + "def plot_ROC(bundled_data, title):\n", + " plt.figure()\n", + " lw = 2\n", + " for roc_auc, roc_auc_sd, mean_fpr, mean_tpr, sample, color in bundled_data:\n", + " plt.plot(mean_fpr, mean_tpr, color=color,\n", + " lw=lw, label=\"{0} (AUC {1:0.2f} $\\pm$ {2:0.2f})\".format(sample, roc_auc, roc_auc_sd))\n", + " plt.plot([0, 1], [0, 1], color='black', lw=lw, linestyle='--')\n", + " plt.xlim([0.0, 1.0])\n", + " plt.ylim([0.0, 1.05])\n", + " plt.xlabel('False Positive Rate')\n", + " plt.ylabel('True Positive Rate')\n", + " plt.title(title)\n", + " plt.legend(loc=\"lower right\")\n", + " plt.show()\n", + " \n", + "# plot confusion matrix\n", + "def plot_confusion_matrix(classes_list, conf_mat, title):\n", + " display_labels = []\n", + " i = 0\n", + " for label in classes_list:\n", + " display_labels += [\"{0}\\nn={1:.0f}\".format(label, sum(conf_mat[:,i]))]\n", + " i = i + 1\n", + " display = ConfusionMatrixDisplay(confusion_matrix=preprocessing.normalize(conf_mat, norm=\"l1\"), \n", + " display_labels=display_labels)\n", + " display.plot(cmap=\"Blues\",values_format=\".2g\")\n", + " plt.title(title)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Fine-Tune With Gene Classification Learning Objective and Quantify Predictive Performance" + ] + }, + { + "cell_type": "markdown", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Hyperparameter tuning is highly recommended for optimal results. No training_args provided; using default hyperparameters.\n" - ] - } - ], "source": [ - "# OF NOTE: token_dictionary_file must be set to the gc-30M token dictionary if using a 30M series model\n", - "# (otherwise the Classifier will use the current default model dictionary)\n", - "# 30M token dictionary: https://huggingface.co/ctheodoris/Geneformer/blob/main/geneformer/gene_dictionaries_30m/token_dictionary_gc30M.pkl\n", - "cc = Classifier(classifier=\"gene\",\n", - " gene_class_dict = gene_class_dict,\n", - " max_ncells = 10_000,\n", - " freeze_layers = 4,\n", - " num_crossval_splits = 5,\n", - " forward_batch_size=200,\n", - " nproc=16)" + "### Please note that, as usual with deep learning models, we **highly** recommend tuning learning hyperparameters for all fine-tuning applications as this can significantly improve model performance. Example hyperparameters are defined below, but please see the \"hyperparam_optimiz_for_disease_classifier\" script for an example of how to tune hyperparameters for downstream applications." ] }, { "cell_type": "code", - "execution_count": 4, - "id": "e4855e53-1cd7-4af0-b786-02b6c0e55f8c", + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "6a3f7bcf2a314368b00f49c74a775571", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Saving the dataset (0/1 shards): 0%| | 0/33558 [00:00:45: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n" ] }, @@ -213,55 +599,47 @@ "
\n", " \n", " \n", - " [834/834 02:37, Epoch 1/1]\n", + " [834/834 01:33, Epoch 1/1]\n", "
\n", " \n", " \n", - " \n", + " \n", " \n", " \n", " \n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", "
StepTraining Loss
830.729100
1660.667600
2490.5531001000.684000
3320.4091002000.617600
4150.2943003000.477400
4980.1970004000.334300
5810.1383005000.229500
6640.0999006000.152700
7470.0837007000.125600
8300.0723008000.104900

" @@ -274,77 +652,108 @@ "output_type": "display_data" }, { - "name": "stdout", + "name": "stderr", "output_type": "stream", "text": [ - "****** Validation split: 2/5 ******\n", - "\n" + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-4d8947ed4c65f4a4.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-8a83f628e23d5548.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-c6c437341faa1cfe.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-2010c177e27e09d1.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-15543d980ad3cbb0.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-a81a942ab15e4aa3.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-5d2c963673bb1115.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-6c7cc476a9d722c3.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-e274abd189113bba.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-1aedba9e0b982e5c.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-6668161997480231.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-d802b8093fb9c6f7.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-3ea48baa5fe880e2.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-86024b6184e99afe.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-7a47db2c9f9758a4.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-af1f6b8f743677db.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-67cffffa35fa22f7.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-81ed63bd02a44ee5.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-6e5a21d4d57e333d.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-eecde81c07e6d036.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-fcc19fab82bb7115.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-ea856d7fa4e78b24.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-698344adb3749f61.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-ee3f9e89abdbee4c.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-d98fd9d7fda61d3b.arrow\n" ] }, { "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "d186836393d84c19b9c0dffafb31a09c", - "version_major": 2, - "version_minor": 0 - }, + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYoAAAEWCAYAAAB42tAoAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAAugElEQVR4nO3deVyU9d7/8dewCriAyqK4b4keS03NTqKJkiYiihp6yjzn1ruy9CzlLzXLtM2jZZ06WWqW3Voa921kmUumFWpuxy1cK0UEFyYXkE224fr9QVGkMIDODAPv5+PhQ2au71zXZ74Pvd5c2/drMgzDQEREpAwuji5ARESqNwWFiIiUS0EhIiLlUlCIiEi5FBQiIlIuBYWIiJRLQSEiIuVyc3QBItVZWFgYFy9exNXVFW9vb0JDQ3nmmWfw8fEBYP/+/fzrX//i0KFDuLi40LNnT6ZOnUq7du1K1pGVlcXrr7/Ol19+yZUrV2jcuDF33303kyZNomHDho76aiIVpiMKESsWLVrEgQMHWLNmDUePHmXJkiUAHDhwgAkTJjBgwAC2bdvGli1buOWWWxg7diwpKSkA5OfnM378eE6cOMHSpUvZt28fH330Eb6+vhw6dMiRX0ukwnREIVJB/v7+9OnTh2PHjgHw8ssvExUVxfjx40va/OMf/+DIkSP8+9//Zv78+Xz66aecP3+e5cuXlxyFNGrUiMcee8wh30GkKnREIVJBqampbNu2jRYtWnD16lUOHDjA4MGDr2l37733smPHDgB27NhBaGhoSUiIOCMdUYhY8ctv/zk5OfTu3Zu//vWvXLlyhaKiIvz9/a9p7+/vT1paGgDp6el07tzZrvWK3Gw6ohCxYuHChRw4cIAVK1aQmJhIWloa9evXx8XFhQsXLlzT/sKFC/j5+QHg6+t73TYizkRBIVJBvXr1Ijo6mnnz5uHt7U3Xrl3ZuHHjNe02bNhA7969AfjjH//I9u3bycnJsXe5IjeNgkKkEsaPH8+OHTs4duwYTzzxBGvWrGH58uVkZWVx5coVXnvtNQ4ePMjkyZMBiIqKIigoiClTpnDy5EmKiopIS0tj0aJFxMfHO/jbiFSMgkKkEho2bEhUVBRvvfUWPXr0YOnSpXz55ZeEhobSv39/jh07xsqVK2nVqhUAHh4evP/++7Rp04b/+q//4vbbb2f06NGkpaVx6623OvbLiFSQSRMXiYhIeXREISIi5VJQiIhIuRQUIiJSLgWFiIiUy+mezL7jjjsIDg52dBkiIk7l7Nmz7N69u0qfdbqgCA4OJi4uztFliIg4lejo6Cp/VqeeRESkXAoKEREpl4JCRETKpaAQEZFyKShERKRcCgoRESmXzYJixowZ3HnnnQwdOvS6yw3D4IUXXiA8PJzIyEiOHDliq1JEROQG2Ow5iujoaB544AGmTZt23eVbt24lKSmJTZs28d133zF79mz+7//+z1bliIidVWZg6sqMYV2Z4a4rVUOl1luJtpVYc3Udy9tmQdGzZ0/OnDlT5vItW7YwfPhwTCYTXbt2JSMjg59++omAgABblSRSJZYig7xCC/mFReQWFHEpO4/sPAvnr1wF4PSlHDzdXCgsMii0GBQWFVFYZJByOYd6ddzILzTILbSQcbWA/MKin9sV/52ZW0haTj4NvNyB0jsKwyjexRhG8c6m+O/ftvnte79pa/xm13S95b/5XH5hkc37T6qH7jfwWYc9mW02mwkKCip5HRQUhNlsvm5QxMbGEhsbC1Ayab3IjbqSU8CRc1dIyykg+XIOBgZ7k9K4mm8hNSOXzNwCLmblV3q9JhO4u7iAqXhHHOzrhae7C3XcXKlbx4067i64ebrh7mqimZ+J7DwLXh6u1PN0AxOYMJWsx1Tyt6n4bxPw88/FP/1uOWAy/fr5n1uXXpfp5y38/LkLmXkE+3lV/PtVsi8qvt6KN67ceivRtjLrrUxjW9VQiW/35Q9VKOZnDguK6x0SltXxMTExxMTEADf2GLrUPldyCki8mMWPP2WxK/ESR89lcCbtKrkFFgqLrv036OvtjqebC12CG+Bfrw51PV3JyrPQzM8LL3dXPNxcqFfHjUY+nni6u+Dr5U59L3e8PYqXubm44Opimx2IyI348o2qf9ZhQREUFERqamrJ69TUVJ12kiozZ+RyITOPfafT2J+cxtm0q+w9fe3Rp5uLiXYBdWnd2IfOTevTMag+zRt64+3hSkMfD3w8nW74MxGbc9j/irCwMD744AMiIiL47rvvqFevnoJCKqSoyOC7M+lsPmbmyLkMvvn+wjVt/Ot5MrxrU3y9Peja3JcOgfVoF1AXDzfdES5SWTYLiscff5w9e/aQlpZG3759mTJlCoWFhQCMHTuWfv36ER8fT3h4OF5eXrz00ku2KkWcnGEYHDufydJtiWz98UKp6wYeri7cfYs/QfXrENren2Z+XnRqWh93VwWCyM1is6B49dVXy11uMpl49tlnbbV5cWJn0nL44oiZQ2fS+cGcxdHzGSXLWjT0ZmCIH73bNGRIlyY09a34RVgRqRqdkBWHK7AUcfRcBjtOXuLd7Ymljhj863nyQO8WNK7rydBbm9AuoJ4DKxWpnRQU4hAJZ9JZd+g8R85msPf0ZXILiu/n9/FwZXjXpgz+QxD9Owbg6ebq4EpFREEhdnPsfAYf7zvDyj3J5ORbAGgXUJe7OwTQo5Uff2zbmFuC6un2UpFqRkEhNpVXaOHtb06y5sBZki7lANC4rgd/bNuIpyM60aqxj4MrFBFrFBRy0xUVGWw7cZHNR818evAsGbnFd7uN692ScXe2pEOgrjOIOBMFhdywrLxCvjicSkpaDrsTL7Mz8VLJsr4d/Bl1ezPu6RRIHXddbxBxRgoKqbL0nHze2ZbIwq9PlrwX7OvFfT2a0aqxD+N6t6ReHXcHVigiN4OCQipt3+k0/t//fUfixWwAGvp48PeB7RnSpQmN63o6uDoRudkUFFJhKZdzmPg/e/nenAlA47qezI3uwoCOAbjoTiWRGktBIeXKLyxi+c4kFsUncjErD1cXE6HtG/Pi8C60aOTt6PJExA4UFHJdh89eYdWeZD777hyZuYV4uLowqHMgU++5hfa6a0mkVlFQSCmHzlzhT0t3kfnzLa1B9evwwvA/cE+nILw8dNeSSG2koBCg+BbXueuP8eHuZADa+vuweNzttPWva7OZvETEOSgohJTLOQxYEE++pYgR3YKZMaQjAfXqOLosEakmFBS1mGEYvPdtEs9/fhSAh/q24akhIQ6uSkSqGwVFLXUwJZ1Znx4m4cwVvNxdeefBHvRp39jRZYlINaSgqGXif7jAtNUJpGbk4mKC8Xe25MnBHTVXtIiUSXuHWmTZt6eYs/Yobi4m7r+jBY/1b6cZ4kTEKgVFLfDFkVQe+3A/hUUGnZvWZ+XE3jTw1hhMIlIxCooarNBSxCubfmBRfPGgfQH1PPnood4aqE9EKkVBUUN9ddzMox/uJ7egiMZ1Pfl8Sh+CGuiWVxGpPAVFDXMwJZ3XvvyB+B8uAPBsZCfG39lKg/aJSJUpKGqIoiKDlzd9z9vfFJ9murNNIxbcd5suVovIDVNQODnDMPjmhws8v/YoiRez6RhUjzf/1I12ARq4T0RuDgWFE7uYlcd9i3aWTCA0b2QX7uvRXGMzichNpaBwUoviT/LPDccBGNw5iFfuu426emhORGxAexYnk19YxJOrv2PNwXN0a+HLnGGdubWZr6PLEpEaTEHhRK5cLWDyyv1s+/EifTv4s2Tc7dRx1xwRImJbCgonsWLXaZ5ZcxiA/w5tzcyITg6uSERqCwVFNXc8NYNHP9xP4oXiC9bv/bkHYR0DHVyViNQmCopq7M2vfuSVTT8AMDAkkJdG/IGA+nq6WkTsS0FRTa3YmVQSEjtnhNGkgR6cExHHcLHlyrdu3cqgQYMIDw9nyZIl1yzPzMzkkUceYdiwYURERPDxxx/bshyn8f7Pw4E3b+jFsecGKyRExKFsdkRhsVh47rnnWLZsGYGBgYwaNYqwsDDatWtX0ubDDz+kbdu2LFq0iMuXLzN48GAiIyPx8PCwVVnVmmEYzP7sCP+z8zQtG3nzwYQ78PLQXU0i4lg2C4qEhARatmxJ8+bNAYiIiGDLli2lgsJkMpGdnY1hGGRnZ9OgQQPc3Grn2bCUyzk8vGIfR89n0K+DP28/0B1vj9rZFyJSvdhsT2Q2mwkKCip5HRgYSEJCQqk2999/P5MmTSI0NJTs7Gxee+01XFyuPRsWGxtLbGwsAGlpabYq2WGOp2bwp3d2k56Tz5ODb2FSv7YahkNEqg2bBYVhGNe89/ud3/bt2wkJCWH58uUkJyfzl7/8hR49elC3bt1S7WJiYoiJiQEgOjraViU7xK7ES/z38r3kFRSx7C+96NfB39EliYiUYrOL2UFBQaSmppa8NpvNBAQElGoTFxfHPffcg8lkomXLljRr1ozExERblVTtfHnUzJgluzABsQ/3VkiISLVks6Do0qULSUlJpKSkkJ+fz7p16wgLCyvVpkmTJuzcuROAixcvcurUKZo1a2arkqqVfafTeHjFXvy83dn6ZH+6tfBzdEkiItdls1NPbm5uzJo1i4kTJ2KxWBg5ciTt27dn1apVAIwdO5ZHH32UGTNmEBkZiWEYTJ06lYYNG9qqpGpjydaTvLT+OCYTvPfnnvh61867vETEOZiM611MqMaio6OJi4tzdBlVFv/DBca/tweAjyfdye0ta34wiojj3ci+U/df2tEnB87wj9jv8HJ3Ze2UuzQLnYg4BQWFnSz8+gQvf/E9Dbzc+fSxu2jV2MfRJYmIVIiCwg7+k3SZl7/4npaNvFk7pQ/167g7uiQRkQqz6VhPArkFFp743+8AmD/yVoWEiDgdBYUNGYbB5JX7Sb6cw8whIdzRppGjSxIRqTQFhQ39v9UJbD72EwNDAvnvvm0cXY6ISJUoKGzklS++Z/W+M9T1dGPRA90dXY6ISJUpKGzgw92nefPrEwD8Z+ZA3FzVzSLivCq8B8vJybFlHTVGyuUcZn5ymGBfL3Y/NUDzSYiI07MaFPv372fIkCEMGTIEgOPHjzN79mxb1+W0nvrkEAAvjPgDgZrfWkRqAKtBMXfuXN599118fX0B6NixI3v37rV1XU7pX5t/YNuPFxl1ezP63xJg/QMiIk6gQqeemjRpUvpD15lcqLbb9uMF/rX5RwCeGhLi4GpERG4eq09mN2nShP3792MymcjPz2fFihW0bdvWHrU5jS+OpPLwin14uLqw/m+hNPTRaLAiUnNYPTSYPXs2H374IWazmX79+nHs2DGeffZZe9TmFA6fvcKkD/bhX8+TzY/3o11AXesfEhFxIlaPKE6dOsWCBQtKvbdv3z5uv/12mxXlLIqKDO5bvJMiA2If6k2LRt6OLklE5KazekTxwgsvVOi92mjp9kRy8i083K8Nbfx1JCEiNVOZRxQHDhzgwIEDXL58mWXLlpW8n5WVhcVisUtx1VlugYWX1h/H3dXEPwZ2cHQ5IiI2U2ZQFBQUkJOTg8ViITs7u+T9unXr8sYbb9iluOrstc0/ADBraCfquOuhOhGpucoMil69etGrVy9GjBhBcHCwPWuq9naevMTi+ES6tfDl/jtaOrocERGbsnox28vLi3nz5nHixAny8vJK3l++fLlNC6uuLEUGf489gMkEC//UHRcXk6NLEhGxKasXs6dOnUqbNm04c+YMkydPJjg4mC5dutijtmrpnW2JmDPymHBXa5r6ejm6HBERm7MaFOnp6YwePRo3Nzd69erF3Llz+e677+xRW7VzIDmNf244TuvGPky7t6OjyxERsQurp57c3IqbBAQE8M033xAQEEBqaqrNC6tuDMMomdJ0xYReuGvocBGpJawGxaRJk8jMzGTatGk8//zzZGdn89RTT9mjtmrlX5t/JPFiNhP6tKaZnx6sE5Haw2pQ9O/fH4B69eqxYsUKoPjJ7Nrkar6F17cUD/g3Q6ecRKSWKTMoLBYLGzZswGw2ExoaSocOHfj6669ZvHgxubm5rFmzxo5lOtai+JMATO7fTrPViUitU2ZQzJw5k/Pnz3PrrbfywgsvEBwczIEDB5g6dSoDBw60Z40OlZ1XyIpdpwH428D2Dq5GRMT+ygyKw4cP89lnn+Hi4kJeXh69e/dm06ZN+Pv727M+h9t0NJXL2fm8FnObLmCLSK1U5p7P3d29ZIIiT09PWrVqVetCAmDZt0nUq+NG1G16Ol1EaqcyjygSExOJjIwseZ2cnFzq9dq1a21bWTWwO/ESCWeuENOjuZ7AFpFaq8ygWL9+vT3rqJYWbCoe+O+JQRodVkRqrzKDorYPBFhoKWJP0mW6t/AloF4dR5cjIuIwNr06u3XrVgYNGkR4eDhLliy5bpvdu3cTFRVFREQEDzzwgC3LqZQPdycDcO8fmji4EhERx7L6wF1VWSwWnnvuOZYtW0ZgYCCjRo0iLCyMdu3albTJyMhgzpw5LF26lKZNm3Lp0iVblVMphmHw5tcnaOjjwfg/tnJ0OSIiDlWhI4rc3FwSExMrteKEhARatmxJ8+bN8fDwICIigi1btpRqs3btWsLDw2natCkAjRo1qtQ2bOWb7y9wITOPEd2C8XDTLbEiUrtZ3Qt+9dVXREVFMXHiRACOHTvGI488YnXFZrOZoKCgkteBgYGYzeZSbZKSksjIyGDcuHFER0dXm6e9V+0pPu30cN82Dq5ERMTxrJ56evPNN1m9ejXjxo0DICQkhLNnz1pdsWEY17xnMpW+xdRisXDkyBHef/99cnNzGTNmDLfddhutW7cu1S42NpbY2FgA0tLSrG77Ru04eYlgXy8C6usitoiI1aBwdXWlXr16lV5xUFBQqeHIzWYzAQEB17Tx8/PD29sbb29vevTowfHjx68JipiYGGJiYgCIjo6udC2V8VNGLll5hUR00UVsERGowKmn9u3bs3btWiwWC0lJSTz//PN069bN6oq7dOlCUlISKSkp5Ofns27dOsLCwkq1GTBgAHv37qWwsJCrV6+SkJBA27Ztq/5tboI5a48CMKJ77b49WETkF1aPKJ555hkWLVqEh4cHTzzxBH369OHRRx+1vmI3N2bNmsXEiROxWCyMHDmS9u3bs2rVKgDGjh1L27ZtCQ0NZdiwYbi4uDBq1Cg6dHDcw225BRbWHToPQO821ePCuoiIo5mM611M+I2jR4/SqVMne9VjVXR0NHFxcTZZ91vfnGD+xu95akhHHurr2CMbEZGb6Ub2nVaPKObOncuFCxcYPHgwERERtG9fM4faNgyD5TuKhxOf2Ed3O4mI/MJqUKxYsYILFy6wYcMGnnnmGbKzs7n33nsrdPrJmRxPzSQ1I5fH+rfVAIAiIr9RoafJ/P39efDBB5kzZw4dO3bkrbfesnVddnfipyxA1yZERH7P6hHFyZMnWb9+PV988QW+vr4MGTKE6dOn26M2u9r+40UAWjT0dnAlIiLVi9WgmDFjBhEREbz77rsEBgbaoyaH+PZkcVA091NQiIj8ltWg+N///V971OFQ569c5UzaVcb01ARFIiK/V2ZQ/O1vf+P1118vNavdb9WkGe6+PFo8BtUDvVs6uBIRkeqnzKCYOXMmAIsWLbJbMY7yfWom3h6udG5a39GliIhUO2Xe9fTLuEwrV64kODi41J+VK1farUB7OJt+lcZ1Pa8ZtFBERCpwe+yOHTuueW/r1q02KcYRruZb+Ob7C7QPqOvoUkREqqUyTz2tXLmSVatWkZKSUuo6RXZ2Nt27d7dLcfbw7Yniu506BzdwcCUiItVTmUERGRlJ3759efXVV3niiSdK3vfx8cHX19cetdnF+p8HAXzwTl3IFhG5njKDwmQy0axZM2bNmnXNsvT09BoTFtn5hbiYoHFdT0eXIiJSLZUZFE888QSLFy8mOjoak8lUasY6k8l0zfzXzup4aiY9WjV0dBkiItVWmUGxePFioHjO7JoqO6+Q05dyGNQ5yHpjEZFayupdT/v27SMnJweATz/9lLlz53Lu3DmbF2YPCWeuABDSpPJTvYqI1BZWg2L27Nl4eXlx/Phxli5dStOmTXnyySftUZvN7fx5fKfWjXVrrIhIWawGhZubGyaTic2bN/Pggw8yfvx4srOz7VGbze1PTgegY5COKEREymI1KHx8fFi8eDGfffYZd999NxaLhcLCQnvUZnMHU9Lx8XCljruro0sREam2rAbFa6+9hoeHBy+99BL+/v6YzWYmTJhgj9psKi07n6y8Qrq39HN0KSIi1ZrVoPD39ycyMpLMzEy+/vprPD09GT58uB1Ks63dpy4BENGliYMrERGp3qwGxfr16xk9ejQbN25kw4YNJT87u0Nni+94Gta1qYMrERGp3qxOXLRo0SJWr15No0bFc0lfvnyZP//5zwwePNjmxdnSsfOZ+Hm74+1htQtERGo1q0cUhmGUhASAr69vqae0ndXhs1fw8VRIiIhYY3VP2adPHyZMmEBERARQfCqqb9++Ni/MloqKDH7KzKNbC19HlyIiUu1ZDYpp06axadMm9u3bh2EYxMTEEB4ebo/abCb5cvGT5l00tLiIiFVlBkVSUhLz5s0jJSWFDh06MG3aNAIDA+1Zm81sOpoKwPBuwQ6uRESk+ivzGsVTTz1F//79eeONN+jcuTPPP/+8PeuyqZW7kwHo1tzXsYWIiDiBMo8osrOzue+++wBo06YNI0aMsFtRtpZ0KYdGPh6aI1tEpALKDIq8vDyOHj1acodTbm5uqdedO3e2T4U3WXpOPgB3tNEcFCIiFVFmUPj7+zN37tyS140bNy55bTKZWL58ue2rs4GP/pMCwICONeN6i4iIrZUZFCtWrLBnHXazak/x9YmIWzV0h4hIRVh94K6mSb2SS+O6nhoxVkSkgmwaFFu3bmXQoEGEh4ezZMmSMtslJCQQEhJi8zGkDMMgr7BI1ydERCrBZkFhsVh47rnnWLp0KevWrePzzz/nxIkT1233yiuv0KdPH1uVUmL3qcuAHrQTEamMCo319Omnn/Lmm28CcO7cORISEqyuOCEhgZYtW9K8eXM8PDyIiIhgy5Yt17RbsWIFgwYNKjWelK2s/a54ru+BIQE235aISE1RoTmzDx48yLp164DiGe/mzJljdcVms5mgoKCS14GBgZjN5mvabN68mTFjxpS7rtjYWKKjo4mOjiYtLc3qtsvyozkLgHYBmvpURKSirAZFQkICzz77LJ6engA0aNCAgoICqyu+3gizv3/A7cUXX2Tq1Km4upZ/YTkmJoa4uDji4uLw86v6jHR7ki7TprFPlT8vIlIbWR0U0M3NDYvFUrKTv3z5Mi4u1i9tBAUFkZqaWvLabDYTEFD6lM/hw4d5/PHHAUhLSyM+Ph43NzcGDhxYqS9REd+nZgLQo5WmPhURqQyrQTFu3Dgee+wxLl26xGuvvcbGjRv5+9//bnXFXbp0ISkpiZSUFAIDA1m3bh0LFiwo1earr74q+Xn69OncfffdNgkJgDUHzwIwsnszm6xfRKSmshoUw4YNo3PnzuzatQvDMHjrrbdo27at9RW7uTFr1iwmTpyIxWJh5MiRtG/fnlWrVgEwduzYG6++ElbvOwNAr9a6NVZEpDKsBsW5c+fw8vKif//+pd5r2tT6XNP9+vWjX79+pd4rKyD++c9/Wl3fjfBwdSGwvqcGAhQRqSSrQfHwww+X/JyXl8eZM2do3bp1yV1QzuJs+lXG9mrh6DJERJyO1aBYu3ZtqddHjhwhNjbWZgXZwi8jxnq46mhCRKSyKv1kdufOnTl06JAtarGZfaeLn71oF1DXwZWIiDgfq0cUy5YtK/m5qKiIo0eP0rChc10QzsorBOA2zWgnIlJpVoMiOzu75GdXV1f69evHoEGDbFrUzZaZWxwUDX08HFyJiIjzKTcoLBYL2dnZTJs2zV712MTBlHQAGtf1dGwhIiJOqMxrFIWFhbi6unL06FF71mMTJy8Uj/GkOShERCqvzCOK0aNH88knnxASEsIjjzzC4MGD8fb2Lll+zz332KXAm+GEOYsWDb2tNxQRkWtYvUZx5coV/Pz82L17d6n3nSUosvIKycwrpO8t/o4uRUTEKZUZFJcuXWLZsmW0b98ek8lUajRYZ3q6+fj5DAD+2Nb2812IiNREZQZFUVFRqTuenFX8DxcA6NSkvoMrERFxTmUGhb+/P5MnT7ZnLTbxU0YeAN1aaHhxEZGqKPOup+tNPOSMTl3MplUjXcgWEamqMoPi/ffft2MZtnMpOw8XJ7qmIiJS3ZQZFL6+vnYswzZyCywkX86hbwfd8SQiUlWVHhTQmew4eZECi8GduuNJRKTKanRQrD9UPGd37zYKChGRqqrRQXEgOQ1XFxMNvNwdXYqIiNOq0UFx5Wohzf28HF2GiIhTq7FBUWAp4mJWHu0C6jm6FBERp1Zjg+LTg+cAiOra1MGViIg4txobFBsPnwcgoksTB1ciIuLcamxQ7Dh5CQ83F1xc9LCdiMiNsDrMuLPy8XTDw7XG5qCIiN3UyD1pek4+FzLzGNEt2NGliIg4vRoZFLtPXQbgD8EaWlxE5EbVyKBYve8MAL1a64lsEZEbVSOD4mJW8RwUDX08HFyJiIjzq3FBYRgGB5LTCdGMdiIiN0WNC4qDKekA3BrcwLGFiIjUEDUuKH65PjExtLWDKxERqRlqXFAcPZ8BQLuAug6uRESkZrBpUGzdupVBgwYRHh7OkiVLrln+2WefERkZSWRkJGPGjOH48eM3tL1CSxEHktMJbd8Yk6Y/FRG5KWwWFBaLheeee46lS5eybt06Pv/8c06cOFGqTbNmzfjggw9Yu3YtkyZN4plnnrmhbe5KLH5+IrR94xtaj4iI/MpmQZGQkEDLli1p3rw5Hh4eREREsGXLllJtunfvToMGxRedu3btSmpq6g1t87sz6QCEttcc2SIiN4vNxnoym80EBQWVvA4MDCQhIaHM9qtXr6Zv377XXRYbG0tsbCwAaWlpZa4jO68QgNaNfapSsoiIXIfNgsIwjGveK+u6wa5du1i9ejUrV6687vKYmBhiYmIAiI6OLnObqRm5+Hq7U8fdtQoVi4jI9dgsKIKCgkqdSjKbzQQEBFzT7vjx4zz99NO88847+Pn53dA2951Oo5mmPhURualsdo2iS5cuJCUlkZKSQn5+PuvWrSMsLKxUm3PnzjFlyhTmz59P69Y39tyDpcjgclY+3u41duR0ERGHsNle1c3NjVmzZjFx4kQsFgsjR46kffv2rFq1CoCxY8eycOFC0tPTmTNnDgCurq7ExcVVaXvHzmeQmVdIpKY+FRG5qWz663e/fv3o169fqffGjh1b8vOLL77Iiy++eFO29eVRMwBhHa89vSUiIlVXI57MLioy+PdXP/KH4Po0bVDH0eWIiNQoNSIofvwpiyIDwm4J0BPZIiI3WY0Iim0/XgDgbp12EhG56WpEUFzMygegi4YWFxG56WpEUKReuUoDL3fcXWvE1xERqVacfs9qGAabj/2kYTtERGzE6YPi/JVcsvIK6d7ixp7qFhGR63P6oDibfhWA25rr+oSIiC04fVCs3J0MwJ1tGjm4EhGRmsnpg2LHyYt4e7gSUF8P2omI2IJTB0VugQVzRh69dTQhImIzTh0Um48Vj+80IEQP2omI2IpTB8XB5HQA7ukUVH5DERGpMqcOikNnrwDgX8/TwZWIiNRcThsUuQUWdp+6zACN7yQiYlNOGxRfHCmeZvXeLk0cXImISM3mtEGx8XBxUAzpousTIiK25LRBceRcBm0a++DtoTmyRURsySmDoqjI4ExaDn9sp+cnRERszSmDYlfiJYoMuLWZr6NLERGp8ZwyKOJ/ntHurnaNHVyJiEjN55RBkXghG4BgXy8HVyIiUvM5ZVCcvpSNj4ero8sQEakVnDIoLmTmEajRYkVE7MIpgyItp0BTn4qI2InTBUVhkQFofCcREXtxuqDIzC0E4O5bNMaTiIg9OF1Q5BZYALijdUMHVyIiUjs4XVDkFxYB4Ofj4eBKRERqB6cLiqy8QiI0YqyIiN04XVAUGQa3t/RzdBkiIrWG0wUFoGcoRETsyCmDQs9QiIjYj02DYuvWrQwaNIjw8HCWLFlyzXLDMHjhhRcIDw8nMjKSI0eOVGi9LRp53+xSRUSkDDYLCovFwnPPPcfSpUtZt24dn3/+OSdOnCjVZuvWrSQlJbFp0yaef/55Zs+eXaF1a5wnERH7sVlQJCQk0LJlS5o3b46HhwcRERFs2bKlVJstW7YwfPhwTCYTXbt2JSMjg59++qn8gk0mTCaTrcoWEZHfsdk8omazmaCgX+ezDgwMJCEhodw2QUFBmM1mAgJKP3UdGxtLbGwsAO5ZqURHR9uqbKeSlpaGn5/uAAP1xW+pL36lvvjVqVOnqvxZmwWFYRjXvPf7I4GKtAGIiYkhJiYGgOjoaOLi4m5Slc5NffEr9cWv1Be/Ul/86kZ+wbbZqaegoCBSU1NLXl/vSOH3bVJTU69pIyIijmWzoOjSpQtJSUmkpKSQn5/PunXrCAsLK9UmLCyMNWvWYBgGBw8epF69egoKEZFqxmanntzc3Jg1axYTJ07EYrEwcuRI2rdvz6pVqwAYO3Ys/fr1Iz4+nvDwcLy8vHjppZesrveXU1Civvgt9cWv1Be/Ul/86kb6wmRc70KBiIjIz5zyyWwREbEfBYWIiJSr2gaFrYb/cEbW+uKzzz4jMjKSyMhIxowZw/Hjxx1QpX1Y64tfJCQkEBISwsaNG+1YnX1VpC92795NVFQUERERPPDAA3au0H6s9UVmZiaPPPIIw4YNIyIigo8//tgBVdrejBkzuPPOOxk6dOh1l1d5v2lUQ4WFhcaAAQOM5ORkIy8vz4iMjDR+/PHHUm2++eYbY8KECUZRUZFx4MABY9SoUQ6q1rYq0hf79u0z0tPTDcMo7pfa3Be/tBs3bpwxceJEY8OGDQ6o1PYq0hdXrlwx7r33XuPs2bOGYRjGxYsXHVGqzVWkL95++21j/vz5hmEYxqVLl4yePXsaeXl5jijXpvbs2WMcPnzYiIiIuO7yqu43q+URha2G/3BGFemL7t2706BBAwC6du1a6tmUmqQifQGwYsUKBg0aRKNGjRxQpX1UpC/Wrl1LeHg4TZs2Baix/VGRvjCZTGRnZ2MYBtnZ2TRo0AA3N5vd9OkwPXv2LNkXXE9V95vVMiiuN/yH2Wwut80vw3/UNBXpi99avXo1ffv2tUdpdlfRfxebN29mzJgx9i7PrirSF0lJSWRkZDBu3Diio6NZs2aNnau0j4r0xf3338/JkycJDQ1l2LBhzJw5ExeXarn7s6mq7jerZaQaN3H4D2dXme+5a9cuVq9ezcqVK21dlkNUpC9efPFFpk6diqtrzR5huCJ9YbFYOHLkCO+//z65ubmMGTOG2267jdatW9urTLuoSF9s376dkJAQli9fTnJyMn/5y1/o0aMHdevWtVeZ1UJV95vVMig0/MevKtIXAMePH+fpp5/mnXfeqbGDoFWkLw4fPszjjz8OFA8IFx8fj5ubGwMHDrRrrbZW0f8jfn5+eHt74+3tTY8ePTh+/HiNC4qK9EVcXBwPPfQQJpOJli1b0qxZMxITE7n11lvtXa5DVXW/WS2PvTT8x68q0hfnzp1jypQpzJ8/v8btBH6rIn3x1VdflfwZNGgQzz77bI0LCahYXwwYMIC9e/dSWFjI1atXSUhIoG3btg6q2HYq0hdNmjRh586dAFy8eJFTp07RrFkzR5TrUFXdb1bLIwpbDf/hjCrSFwsXLiQ9PZ05c+YA4OrqWiNHzKxIX9QWFemLtm3blpyTd3FxYdSoUXTo0MHBld98FemLRx99lBkzZhAZGYlhGEydOpWGDRs6uPKb7/HHH2fPnj2kpaXRt29fpkyZQmFhIXBj+00N4SEiIuWqlqeeRESk+lBQiIhIuRQUIiJSLgWFiIiUS0EhIiLlUlBItRQSEkJUVFTJnzNnzpTZtlu3bje8venTpxMWFkZUVBQjRozgwIEDlV7HzJkzOXHiBACLFi0qtexmDSnyS78MHTqURx55hIyMjHLbHzt2jPj4+Juybam9dHusVEvdunWr8M66Mm3LMn36dO6++24GDx7M9u3bmTdvHmvXrq3y+m5GTdbWO23aNFq1asWkSZPKbB8XF8fhw4eZNWvWTa9Fag8dUYhTyM7OZvz48YwYMYLIyEg2b958TZuffvqJ+++/v+Q37r179wLF4/zExMQwYsQI/vrXv5KdnV3utnr27ElycjIAy5YtY+jQoQwdOpT3338fgJycHB566CGGDRvG0KFDWb9+PQDjxo3j0KFDvPLKK+Tm5hIVFcUTTzwB/HrU8/e//73Ub/jTp0/niy++wGKxMG/ePEaOHElkZCQfffSR1T7p2rVryYBuCQkJjBkzhuHDhzNmzBgSExPJz8/njTfeYP369URFRbF+/XpycnKYMWMGI0eOZPjw4dftR5Fr3NDg5yI20rFjR2PYsGHGsGHDjEcffdQoKCgwMjMzDcMonk9g4MCBRlFRkWEYhtG1a1fDMAzj3XffNd566y3DMIrnKMjMzDQuXbpk/OlPfzKys7MNwzCMxYsXG//+97+v2d60adNK5q5Yv369MWrUKOPQoUPG0KFDjezsbCMrK8sYMmSIceTIEWPjxo3GzJkzSz6bkZFhGIZhPPDAA0ZCQkKpmn7xy+tNmzYZTz75pGEYhpGXl2f07dvXuHr1qvHRRx8ZCxcuLHl/xIgRRnJy8jV1/rKewsJCY8qUKUZ8fLxhGIaRmZlpFBQUGIZhGN9++60xefJkwzAM4+OPPzbmzJlT8vkFCxYYa9asMQyjeL6Ke+65p6RvRMpSLYfwEKlTpw6ffvppyeuCggJeffVV/vOf/+Di4oLZbObixYv4+/uXtOnSpQtPPfUUhYWFDBw4kJCQEL7++mtOnDhRMrxHQUEBXbt2ve4258+fz9tvv03Dhg158cUX2blzJwMHDsTb2xuA8PBw9u7dS2hoKPPmzePll1+mf//+9OjRo8Lfq2/fvrzwwgvk5+ezdetWevToQZ06dfj222/5/vvv+eKLL4DiGdlOnz5N8+bNS33+lyOVs2fP0rlzZ+66666S9tOmTeP06dOYTCYKCgquu/3t27fz1Vdf8d577wGQl5fH+fPna+QYUHLzKCjEKaxdu5bLly8TFxeHu7s7YWFh5OXllWrTs2dPPvjgA+Lj43nyySeZMGEC9evX56677uLVV1+1uo0nn3ySwYMHl7zesWPHddu1bt2auLg44uPjWbBgAXfddReTJ0+u0Pfw9PSkV69ebNu2jQ0bNhAREQEUD//89NNPExoaWu7nfwnQzMxMHn74YT788EMefPBBXn/9de644w4WLlzImTNnePDBB8tcxxtvvEGbNm0qVK8I6BqFOInMzEwaNWqEu7s7u3bt4uzZs9e0OXv2LI0aNeK+++5j5MiRHDlyhK5du7J//35Onz4NwNWrVzl16lSFttmzZ082b97M1atXycnJYfPmzfTo0QOz2YyXlxdRUVFMmDCBo0ePXvNZNze3Mn+rj4iIIC4ujr1799KnTx8A+vTpw6pVq0o+c+rUKXJycsqsrV69ejz99NO89957FBQUkJmZSWBgIACffPJJSTsfH59S12T69OnDBx98UDIvwfVqF/k9HVGIU4iMjGTSpElER0cTEhJy3d+I9+zZw7vvvoubmxve3t7MmzePhg0bMnfuXB5//HHy8/OB4gvKFRmOvXPnzkRHRzN69GgARo0aRadOndi2bRvz58/HxcUFNzc3Zs+efc1n77vvPoYNG0anTp1YsGBBqWV33XUX06ZNIywsDA8PDwBGjx7N2bNniY6OxjAM/Pz8eOutt8qtr1OnTnTs2JF169YxceJEpk+fzrJly+jdu3dJmzvuuIMlS5YQFRXFww8/zKOPPspLL73EsGHDMAyD4OBgFi9ebLUvpHbT7bEiIlIunXoSEZFyKShERKRcCgoRESmXgkJERMqloBARkXIpKEREpFwKChERKdf/BySMCvqgAUcIAAAAAElFTkSuQmCC", "text/plain": [ - "Filter (num_proc=16): 0%| | 0/33558 [00:00" ] }, "metadata": {}, "output_type": "display_data" }, { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "26cb17f7d5b7440192ed7ada0070fa7d", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Filter (num_proc=16): 0%| | 0/33558 [00:00:45: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n" ] }, @@ -355,55 +764,47 @@ "

\n", " \n", " \n", - " [834/834 02:34, Epoch 1/1]\n", + " [834/834 01:33, Epoch 1/1]\n", "
\n", " \n", " \n", - " \n", + " \n", " \n", " \n", " \n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", "
StepTraining Loss
830.695400
1660.6346001000.658900
2490.5402002000.585400
3320.4148003000.474600
4150.2985004000.346600
4980.1991005000.257400
5810.1332006000.185800
6640.0963007000.134200
7470.078100
8300.0681008000.114500

" @@ -416,77 +817,96 @@ "output_type": "display_data" }, { - "name": "stdout", + "name": "stderr", "output_type": "stream", "text": [ - "****** Validation split: 3/5 ******\n", - "\n" + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-cbfcb02a16dd9d81.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-b151d664d8c68613.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-52266cf801a76344.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-5c7ceff44bad692c.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-81bcbb23e61bfc0c.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-e99a8c7eedd34769.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-6d7d5150907035d9.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-735b525b0abf0f74.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-9a47cf8290cd2f6b.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-56deb15eec02ca33.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-2aea162267b33f73.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-3bc7a169c841323d.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-1f67206928846c7a.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-88375062775280fb.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-bb45ebd2db699b53.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-fd6e4344cc2f8033.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-b8a9338cde5e5801.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-c013876f43a71ad7.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-148c328cb89da5c3.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-488b3d116a6d3b19.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-835e3e1538e24397.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-d176e8ab14f1ce28.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-3451fb13f869a5b0.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-56f270f895acc3ff.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-db497551e7a1e808.arrow\n" ] }, { "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "93e9c12bc6e243b39224994add37ce21", - "version_major": 2, - "version_minor": 0 - }, + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYoAAAEWCAYAAAB42tAoAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAAsFUlEQVR4nO3deXxM5x4/8M8sEmKLRBYStURUuIg2lpZYQgQRYUJDW3VbfpTS24trqVpr+dGqWy1FKT+U5l5NLQ2qtqC2IhVC2kaESGSQPZN98vz+COOmySyJzJkMn/fr1dfNzHnmnO88tz2fec7yHJkQQoCIiEgPuaULICKimo1BQUREBjEoiIjIIAYFEREZxKAgIiKDGBRERGQQg4KIiAxSWroAoprMz88PDx8+hEKhgJ2dHXx9fTFv3jzUrVsXAHD58mX8+9//xtWrVyGXy9GlSxfMmDEDrVu31q0jJycHn3/+OX7++WdkZmaicePG6NOnDyZNmgQHBwdLfTUik3FEQWTE+vXrERUVhT179uD69evYuHEjACAqKgrjxo1Dv379cOrUKRw9ehQvvvgiRo8ejcTERABAYWEhxo4di7i4OGzatAmXLl3Cd999B3t7e1y9etWSX4vIZBxREJnIyckJPXv2xI0bNwAAn3zyCYKDgzF27Fhdm3/+85+IiYnBF198gZUrV2Lv3r24d+8etm3bphuFODo64r333rPIdyCqCo4oiEyUkpKCU6dO4YUXXkBeXh6ioqIwcODAcu0GDRqEM2fOAADOnDkDX19fXUgQWSOOKIiMePzrPzc3F927d8f777+PzMxMlJSUwMnJqVx7JycnpKenAwAyMjLQvn17Seslqm4cURAZsXbtWkRFRWH79u2Ij49Heno6GjRoALlcjgcPHpRr/+DBAzRq1AgAYG9vX2EbImvCoCAyUdeuXaFSqbBixQrY2dnB29sbhw4dKtfu4MGD6N69OwDg1VdfxenTp5Gbmyt1uUTVhkFBVAljx47FmTNncOPGDUyfPh179uzBtm3bkJOTg8zMTKxevRq//fYbpkyZAgAIDg6Gq6srpk6dips3b6KkpATp6elYv349IiMjLfxtiEzDoCCqBAcHBwQHB2PdunXw8fHBpk2b8PPPP8PX1xd9+/bFjRs3sHPnTrRo0QIAYGNjg61bt6JVq1Z455138PLLL2PkyJFIT09Hx44dLftliEwk44OLiIjIEI4oiIjIIAYFEREZxKAgIiKDGBRERGSQ1d2Z3a1bN7i5uVm6DCIiq5KUlITz589X6bNWFxRubm4IDw+3dBlERFZFpVJV+bM89ERERAYxKIiIyCAGBRERGcSgICIigxgURERkEIOCiIgMMltQzJkzB6+88gqGDBlS4XIhBJYsWQJ/f38EBQUhJibGXKUQEdFTMNt9FCqVCm+++SZmzZpV4fKTJ08iISEBhw8fxpUrV7Bw4UL897//NVc5REQVMjSBtr5F+j5hcF2V3kbl6zIXswVFly5dcPfuXb3Ljx49imHDhkEmk8Hb2xtZWVm4f/8+nJ2dzVUS0VMRQqBQW4KC4hJk5xcjO78IhcUlKCwuwcOcAshkMhQUlyAlMw/yR3/feqhBPVslCh61i3+YA8e6NhACKBECJY/+F3j0uqT0f4Uo3VH8npINlwa1dcsFSncS4tFnBUo/87g+3XviSTvx6LMZuUWopZBBIZdV8N30fGe9nWGgn/QsNLRzq+yO19B+kg9OqNhLT/FZi92ZrVar4erqqnvt6uoKtVpdYVCEhYUhLCwMAHQPrSeqKvFop5mZV4QHOQUo1grkF2txLyMfchlwLzMfD3IKcC8jD2m5RbidqoFCJkOqprDK22xczxa2SjlqKWT4U52DFo3tIJfJIJPJIAMglwFymezRe4BcDshlcrRr2gDZ+cVo7mgHGUqXyWQyyGV49LnSP2R49N6j9Txu97iN7FE2pGoK4W5fp+Iiy+fHo7crXiDT097Aqox8pnLbMbAqvR+ydF16P2OogMquS08FP/9R6U3oWCwoKvqloK+zQkNDERoaCuDpbkOn50N+kRbJGXm4mpSJqDsZiE3JQlJGHlIy86GQl/7SN/ars76tEg3taqGguARerg2gkMvQsnFd1LFRwLm+LerYKNCgdi3UqaWAjVIOhVwGOxsF6tdWwlapQB0bBerZKmGrlFdpJ0BU3X5eU/XPWiwoXF1dkZKSonudkpLCw05UKUII3E3Pw5/3s/Fj9D3cfKDBlcSMcu1slXI41beFT3MHNKxTC21c6qF+7VpwrGcDmQxwrl8bdWwUsFHIYW9XCw3q1EKD2rWk/0JENZTFgsLPzw87duxAYGAgrly5gvr16zMoyKAriRn4f2cTEHc/BwAQfTezzHKn+rYY3MEVNgo5Orrbo4N7Q7Rr0gB1ba1u7kuiGsVs/wVNmzYNFy5cQHp6Onr16oWpU6eiuLgYADB69Gj07t0bkZGR8Pf3R506dbBs2TJzlUJWKDEtFyf+eIATsfcR/1CDWw81umU2SjlecLDDiJfd4dLAFt7NGqHzC/ZoXM/WghUTPbvMFhSfffaZweUymQwLFiww1+bJiuQXafFrQhrOx6fhXmY+zt9Kxd30PN3ypg1ro0uLRvBq0gATe3vATd/JWCIyC47JySIeZBfgs59/x+8p2bh8J6PMMl/PxlB1dkNHd3v0ftEJtRScQIDIkhgUJJmHOQX44XISfohKwvV7Wbr3B/3NFZ2a2WNAOxe0bFyXVwkR1TAMCjKLwuISHL2hxo2UbJz4/X6ZE89u9nUwvLMb/Nu5YHCHJhaskohMwaCgaiOEwIGrKfghKgnn4lORU1B68YJLA1t0amaPdk0aYEjHJujRurGFKyWiymBQ0FPRlggcjknB3t+ScSjmyX0xr3o4YuyrLdC7jRNq11JYsEIieloMCqq0gmItDly9hzNxqdh7JRmFxaWTDfk/Oscwxa81b1gjeoYwKMhk+68kY9OpeFz5y/mGkJfd8U6PFrC3s7FgdURkLgwK0qtYW4KtZxJw+U46jsc+QF6RFgAwoJ0LBrR3hb+XCxraceRA9KxjUFA515Oz8M0vt7D70pNp4tu41MOrHo0xc+CLsLPhvzZEzxP+F0/IK9QiKjEd4ZeT8PN1NTLzigAAHd0bQtXZDWNeaVHhMwyI6PnAoHhO/aHOxv4ryYi4eg/xDzRllo3r2RJvdHsBrZzqWag6IqpJGBTPESEELtxKw/rImzj++wMAwAsOdgj2booeHo3h5+XMifWIqBwGxXMi6k46Xv/6vO6EdNcWDpgb6IVOzewtWxgR1XgMimdcckYevjweh53n78BWKceY7s3xT/82cKjLS1mJyDQMimeQEAKX72Rg/t5riEkunXzv5eaN8O9QbzRzsLNwdURkbRgUzxBNQTE2n76Fr07cRF6RFrZKOfq1dcbUfp7o5N6Qs7ISUZUwKJ4RB67ew+RvLwMA5DJA9ZIb5g9px7ulieipMSis2IPsApyNT8XO87dxLj4NAPDF6M4Y0rEJRw9EVG0YFFbozM2H+L8HY3XPeLBVyjGqSzPMCHiRl7cSUbVjUFiRA1fvYd6ea0jVFAIAVJ3dMKC9K3w9G6OuLf+vJCLz4N7FCuQUFGNpxHXsupAIOxsFurV0wNLhHdDamXdOE5H5MShqMCEEvr+chBn/vQIAaOtaH3ve68EHARGRpBgUNVCaphD7ryRjwb4YAEB9WyUm9m6FSX1ac3I+IpIcg6IG0ZYIfHUiDp8e/gNA6UnqcT1b4v1+nhxFEJHFMChqgPwiLb48FoevT8WjoLgEnV+wxwTfVujb1pkBQUQWx6CwICEE9l1Jxr/+G41Cbelzp/+Pb0vMGeQFOQ8xEVENwaCwgIJiLZb8eAPbz93WvbdseAeM9HFHLYXcgpUREZXHoJDYqT8f4MMfriIxLQ9dWzogoL0rRrzkzmdPE1GNxaCQSEmJwMzvo3XPoV4Z0hGvdWlm4aqIiIxjUJhZfpEW607cxPoTN1GoLYFjXRt88XpnvOrR2NKlERGZhEFhRnuikvBB2G8AgM4v2OPvr7ZAsLebZYsiIqokBoUZnP7zIebtvYZbDzVo4WiH6QNeRFCnppYui4ioSswaFCdPnsTSpUtRUlKCkSNHYsKECWWWZ2dn41//+heSk5Oh1WrxzjvvICQkxJwlmVVqTgFe//o8fldnAwAa17PFvqk90aA2T1QTkfUyW1BotVosXrwYW7ZsgYuLC0aMGAE/Pz+0bt1a1+bbb7+Fh4cH1q9fj7S0NAwcOBBBQUGwsbG+h+2cj0/FpG8vI01TiAHtXDBvSDs+dpSInglmC4ro6Gg0b94czZqVXtkTGBiIo0ePlgkKmUwGjUYDIQQ0Gg0aNmwIpdK6joYJIbDi0O9YH3kTALBxzMsY0N7VwlUREVUfs+2V1Wo1XF2f7DBdXFwQHR1dps0bb7yBSZMmwdfXFxqNBqtXr4ZcXv6Gs7CwMISFhQEA0tPTzVVypQghcOhaCpYdvIHEtDz0aO2IfwW0hXcze0uXRkRUrcwWFEKIcu/99fGcp0+fhpeXF7Zt24Y7d+7g7bffho+PD+rVK/uchdDQUISGhgIAVCqVuUo22f3sfIzeeA43H2gAABN7tcLsQW35+FEieiaZLShcXV2RkpKie61Wq+Hs7FymTXh4OCZMmACZTIbmzZvD3d0d8fHx6Nixo7nKemo3H+Sg36pIAIB/Oxeseq0TT1YT0TPNbBMLdejQAQkJCUhMTERhYSEiIiLg5+dXpk2TJk1w9uxZAMDDhw9x69YtuLu7m6ukp7bj3G0M+vcpAMDKER3x9Vs+DAkieuaZbUShVCoxf/58jB8/HlqtFiEhIfD09MSuXbsAAKNHj8bkyZMxZ84cBAUFQQiBGTNmwMHBwVwlPZUtv9zCov3XUddGgf++2wOdeC6CiJ4TMlHRyYQaTKVSITw8XNJtXr2biaAvT8NWKceZ2X5wrGcr6faJiJ7W0+w7Oae1EYlpuZj07SUAwJ73ejAkiOi5Y103LUgsO78IfT49AW2JwMqQjvBq0sDSJRERSY4jCj0KirUYuf4stCUCy4Z34JTgRPTc4oiiApl5RXhl+VHkFmoxsVcrvN7tBUuXRERkMQyKv9AUFGPk+jPILdRiUh8PzAx40dIlERFZFIPifzzMKcCYzRfwhzoH7/X1wL8C2lq6JCIii2NQPJKZV4Rha3/B3fQ8LAhqh7d7tLR0SURENQKDAqUnroc/Cgk+y5qIqCyTr3rKzc01Zx0Wk1+khWrdGcQ/1GDGgDYMCSKivzAaFJcvX8bgwYMxePBgAEBsbCwWLlxo7roks3BfDGKSs/BeXw9M8fO0dDlERDWO0aBYvnw5Nm/eDHt7ewBA27ZtcfHiRXPXJYnjsffx3a+JAMAT10REeph06KlJkyZlP1TBw4Wszc0HOXh7668AgO8nvWrhaoiIai6jJ7ObNGmCy5cvQyaTobCwENu3b4eHh4cUtZnVxO2l8zeFTeiOl5s3snA1REQ1l9GhwcKFC/Htt99CrVajd+/euHHjBhYsWCBFbWZz8o8HiLufg35tndGtlaOlyyEiqtGMjihu3bqFVatWlXnv0qVLePnll81WlDlpCorxzqNDTrMH8bwEEZExRkcUS5YsMek9azFzdzSKSwSm9G0NT5f6li6HiKjG0zuiiIqKQlRUFNLS0rBlyxbd+zk5OdBqtZIUV93WHo9DxNV7GPQ3V8zgHE5ERCbRGxRFRUXIzc2FVquFRqPRvV+vXj2sWbNGkuKq0930XHzy0+9oWKcW/j3K29LlEBFZDb1B0bVrV3Tt2hXDhw+Hm5ublDWZxfy9MQCA1aGdYKtUWLgaIiLrYfRkdp06dbBixQrExcWhoKBA9/62bdvMWlh1upuei2Ox99GuSQP4tXWxdDlERFbF6MnsGTNmoFWrVrh79y6mTJkCNzc3dOjQQYraqs1/Ht19/U//NhauhIjI+hgNioyMDIwcORJKpRJdu3bF8uXLceXKFSlqqxYlJQIbT8VDIZehX1tnS5dDRGR1jB56UipLmzg7O+PEiRNwdnZGSkqK2QurLmuO/Yn8ohIM7+wGuVxm6XKIiKyO0aCYNGkSsrOzMWvWLHz88cfQaDT48MMPpajtqeUVavHFsTgAwMKg9hauhojIOhkNir59+wIA6tevj+3btwMovTPbGnz36x1oSwRWh3ZCQ7tali6HiMgq6Q0KrVaLgwcPQq1Ww9fXF23atMHx48exYcMG5OfnY8+ePRKWWXlCCKw89DsAYNDfmhhpTURE+ugNirlz5+LevXvo2LEjlixZAjc3N0RFRWHGjBno37+/lDVWyazvo5FXpEW3lg6oXYv3TRARVZXeoLh27Rr27dsHuVyOgoICdO/eHYcPH4aTk5OU9VVJVn4R/nPxLhRyGb4d383S5RARWTW9l8fWqlVL94AiW1tbtGjRwipCAgDmhF8FAGwe6wOlwvofskREZEl6RxTx8fEICgrSvb5z506Z1/v37zdvZVUU9usdRETfw5COTdDnRd43QUT0tPQGxYEDB6Sso9r85+JdAMDKER0tXAkR0bNBb1BY40SAxdoSxN7LQgtHO9jZGL3yl4iITGDWA/gnT55EQEAA/P39sXHjxgrbnD9/HsHBwQgMDMSbb775VNv7Q50DTaEWY15p8VTrISKiJ8z2s1ur1WLx4sXYsmULXFxcMGLECPj5+aF169a6NllZWVi0aBE2bdqEpk2bIjU19am2ueP8bQBAuyYNnmo9RET0hEkjivz8fMTHx1dqxdHR0WjevDmaNWsGGxsbBAYG4ujRo2Xa7N+/H/7+/mjatCkAwNHRsVLb+Ktz8amQyYDurRyeaj1ERPSE0aA4duwYgoODMX78eADAjRs38O677xpdsVqthqurq+61i4sL1Gp1mTYJCQnIysrCmDFjoFKpnupu78S0XMQ/0GBq39aQyTj5HxFRdTF66OnLL7/E7t27MWbMGACAl5cXkpKSjK5YCFHuvb/uwLVaLWJiYrB161bk5+dj1KhR6NSpE1q2bFmmXVhYGMLCwgAA6enpFW7vi2N/AgA6utsbrY2IiExnNCgUCgXq169f6RW7urqWmY5crVbD2dm5XJtGjRrBzs4OdnZ28PHxQWxsbLmgCA0NRWhoKABApVJVuL3kjHwAQP92fIIdEVF1MnroydPTE/v374dWq0VCQgI+/vhjdO7c2eiKO3TogISEBCQmJqKwsBARERHw8/Mr06Zfv364ePEiiouLkZeXh+joaHh4eFTpi1xNykQn94ZV+iwREelndEQxb948rF+/HjY2Npg+fTp69uyJyZMnG1+xUon58+dj/Pjx0Gq1CAkJgaenJ3bt2gUAGD16NDw8PODr64uhQ4dCLpdjxIgRaNOm8o8rvZ2qQWZeEV5q3qjSnyUiIsNkoqKTCf/j+vXraNeunVT1GKVSqRAeHl7mvfd2XkZE9D0c/IcvvHhpLBFRORXtO01ldESxfPlyPHjwAAMHDkRgYCA8PT2rtCFzOnD1HgAwJIiIzMBoUGzfvh0PHjzAwYMHMW/ePGg0GgwaNMikw09SuJ6cBSGAQX9zNd6YiIgqzaQb7pycnPDWW29h0aJFaNu2LdatW2fuuky2+1LpJIB/f7WFZQshInpGGR1R3Lx5EwcOHMBPP/0Ee3t7DB48GLNnz5aiNpNk5xcBADq/wBPZRETmYDQo5syZg8DAQGzevBkuLjXvHoXE9Fx4NWkAGyUfUEREZA5Gg+I///mPFHVUSUmJwLWkLPRtywcUERGZi96g+Mc//oHPP/+8zFPt/ldNeMLdb3czkFNQjFdaPd1kgkREpJ/eoJg7dy4AYP369ZIVU1mRvz8AwNliiYjMSe+B/cfzMu3cuRNubm5l/tm5c6dkBRry5/1sAMALDnYWroSI6Nll9AzwmTNnyr138uRJsxRTWbdTc+FQ1wZKBU9kExGZi95DTzt37sSuXbuQmJhY5jyFRqPBSy+9JElxxtxNz0NHTgRIRGRWeoMiKCgIvXr1wmeffYbp06fr3q9bty7s7e2lqM2ggmItMvOKeNiJiMjM9AaFTCaDu7s75s+fX25ZRkaGxcPizM3S52u/6Fr5Z2UQEZHp9AbF9OnTsWHDBqhUKshksjJPrJPJZOWefy21xLRcAIBPc17xRERkTnqDYsOGDQBKn5ldE916qAEAODewtXAlRETPNqOXC126dAm5uaW/3vfu3Yvly5cjOTnZ7IUZcz4+DQDgWNfGwpUQET3bjAbFwoULUadOHcTGxmLTpk1o2rQpZs6cKUVtBqVqClDXRgGZTGbpUoiInmlGg0KpVEImk+HIkSN46623MHbsWGg0Gilq0ytdUwh1VgF6tG5s0TqIiJ4HRoOibt262LBhA/bt24c+ffpAq9WiuLhYitr0OhtfesXTgPZ8WBERkbkZDYrVq1fDxsYGy5Ytg5OTE9RqNcaNGydFbXrF3ssCAPRqwxEFEZG5GQ0KJycnBAUFITs7G8ePH4etrS2GDRsmQWn6qbMKUEshg3P92hatg4joeWA0KA4cOICRI0fi0KFDOHjwoO5vS4pKTIdTPV4WS0QkBaMPLlq/fj12794NR8fSZz6kpaXh73//OwYOHGj24vT5Q52DN7q9YLHtExE9T4yOKIQQupAAAHt7+zJ3aVuKtsTyNRARPQ+Mjih69uyJcePGITAwEEDpoahevXqZvTB9Sh6FVCunuhargYjoeWI0KGbNmoXDhw/j0qVLEEIgNDQU/v7+UtRWoeJHI4kiLUcURERS0BsUCQkJWLFiBRITE9GmTRvMmjULLi4uUtZWoccB4dqAVzwREUlB7zmKDz/8EH379sWaNWvQvn17fPzxx1LWpdfjQ0/ujepYuBIioueD3hGFRqPBa6+9BgBo1aoVhg8fLllRhhQVlwAAnDmiICKShN6gKCgowPXr13VXOOXn55d53b59e2kq/Iv8Ii0AoElDBgURkRT0BoWTkxOWL1+ue924cWPda5lMhm3btpm/ugrIH80WW7uWwiLbJyJ63ugNiu3bt0tZh8m0QqB+baMXaxERUTUxesNdTVOkFSjmpbFERJIxa1CcPHkSAQEB8Pf3x8aNG/W2i46OhpeXl0lzSMllQIM6HFEQEUnFbEGh1WqxePFibNq0CREREfjxxx8RFxdXYbtPP/0UPXv2NG29JYL3UBARScikuZ727t2LL7/8EgCQnJyM6OhooyuOjo5G8+bN0axZM9jY2CAwMBBHjx4t12779u0ICAgoM5+UIdoSgUZ8TjYRkWRMemb2b7/9hoiICAClT7xbtGiR0RWr1Wq4uj55Ap2LiwvUanW5NkeOHMGoUaMMrissLAwqlQoqlQrFWi1sFFZ3aoWIyGoZ3eNGR0djwYIFsLUtff5Dw4YNUVRUZHTFFc0wK3t0aetjS5cuxYwZM6BQGL7UNTQ0FOHh4QgPD4dcrkAtBgURkWSMnhVWKpXQarW6nXxaWhrkcuM7aldXV6SkpOheq9VqODs7l2lz7do1TJs2DQCQnp6OyMhIKJVK9O/fX+96hRBQyGV6lxMRUfUyGhRjxozBe++9h9TUVKxevRqHDh3CBx98YHTFHTp0QEJCAhITE+Hi4oKIiAisWrWqTJtjx47p/p49ezb69OljMCQAoFBbors7m4iIzM9oUAwdOhTt27fHuXPnIITAunXr4OHhYXzFSiXmz5+P8ePHQ6vVIiQkBJ6enti1axcAYPTo0VUqWCGX8a5sIiIJyYSRx9UlJydX+H7Tpk3NUpAxbV/pj9Effo4FQZaZa4qIyBqpVCqEh4dX6bNGRxQTJ07U/V1QUIC7d++iZcuWuqugpCYEoOQ5CiIiyRgNiv3795d5HRMTg7CwMLMVZEyJEFCYcDKdiIiqR6X3uO3bt8fVq1fNUYvJMnILLbp9IqLnidERxZYtW3R/l5SU4Pr163BwcDBrUca84Ghn0e0TET1PjAaFRqPR/a1QKNC7d28EBASYtShj5DKeoyAikorBoNBqtdBoNJg1a5ZU9ZiEMUFEJB295yiKi4uhUChw/fp1KesxCUcURETS0TuiGDlyJH744Qd4eXnh3XffxcCBA2Fn9+TcwIABAyQpsCLMCSIi6Rg9R5GZmYlGjRrh/PnzZd63ZFBwREFEJB29QZGamootW7bA09MTMpmszGywf50FVmrMCSIi6egNipKSkjJXPNUkHFEQEUlHb1A4OTlhypQpUtZiMs7gQUQkHb1XPRmZK5CIiJ4TeoNi69atEpZRSTz0REQkGb1BYW9vL2EZleNS39bSJRARPTeschpWbQkPixERScUqg6JebaO3fxARUTWxyqDgo1CJiKRjlUHB+yiIiKRjlUGh4I0URESSsc6g4IiCiEgyVhkUfGQ2EZF0rHKXy3MURETSscqgqGfLy2OJiKRilUEh58lsIiLJWGdQMCeIiCRjpUHBpCAikgqDgoiIDLLSoLB0BUREzw8rDQomBRGRVKwzKDikICKSjFUGRR3OHktEJBmzBsXJkycREBAAf39/bNy4sdzyffv2ISgoCEFBQRg1ahRiY2PNWQ4REVWB2YJCq9Vi8eLF2LRpEyIiIvDjjz8iLi6uTBt3d3fs2LED+/fvx6RJkzBv3jxzlUNERFVktqCIjo5G8+bN0axZM9jY2CAwMBBHjx4t0+all15Cw4YNAQDe3t5ISUkxad08l01EJB2zTZqkVqvh6uqqe+3i4oLo6Gi97Xfv3o1evXpVuCwsLAxhYWHVXiMRERlntqAQQpR7T6ZnKHDu3Dns3r0bO3furHB5aGgoQkNDAQCe3fqBAwoiIumYLShcXV3LHEpSq9VwdnYu1y42NhYfffQRvv76azRq1Mhc5RARURWZ7RxFhw4dkJCQgMTERBQWFiIiIgJ+fn5l2iQnJ2Pq1KlYuXIlWrZsafK69Y1MiIio+pltRKFUKjF//nyMHz8eWq0WISEh8PT0xK5duwAAo0ePxtq1a5GRkYFFixYBABQKBcLDw81VEhERVYFMVHQyoQbz7NYPsWePQMG7s4mITKZSqar8Q9wq78xmRBARSccqg4KIiKRjlUHBc9lERNKxyqAgIiLpWGVQ8PJYIiLpWGVQEBGRdBgURERkEIOCiIgMYlAQEZFBDAoiIjKIQUFERAYxKIiIyCAGBRERGWR1QcFb7YiIpGV1QUFERNJiUBARkUEMCiIiMohBQUREBllfUPBsNhGRpKwvKIiISFJWGBQcUhARSckKg4KIiKTEoCAiIoMYFEREZBCDgoiIDLK6oOCpbCIiaVldUBARkbQYFEREZBCDgoiIDGJQEBGRQQwKIiIyiEFBREQGMSiIiMggswbFyZMnERAQAH9/f2zcuLHcciEElixZAn9/fwQFBSEmJsac5RARURWYLSi0Wi0WL16MTZs2ISIiAj/++CPi4uLKtDl58iQSEhJw+PBhfPzxx1i4cKG5yiEioioyW1BER0ejefPmaNasGWxsbBAYGIijR4+WaXP06FEMGzYMMpkM3t7eyMrKwv379w2ul3dmExFJS2muFavVari6uupeu7i4IDo62mAbV1dXqNVqODs7l2kXFhaGsLCw0oJzUqBSqcxVtlVJT09Ho0aNLF1GjcC+eIJ98QT74olbt25V+bNmCwohRLn3ZDJZpdsAQGhoKEJDQwEAKpUK4eHh1VSldWNfPMG+eIJ98QT74omn+YFttkNPrq6uSElJ0b2uaKTw1zYpKSnl2hARkWWZLSg6dOiAhIQEJCYmorCwEBEREfDz8yvTxs/PD3v27IEQAr/99hvq16/PoCAiqmHMduhJqVRi/vz5GD9+PLRaLUJCQuDp6Yldu3YBAEaPHo3evXsjMjIS/v7+qFOnDpYtW2Z0vY8PQRH74n+xL55gXzzBvnjiafpCJio6UUBERPQI78wmIiKDGBRERGRQjQ0KTv/xhLG+2LdvH4KCghAUFIRRo0YhNjbWAlVKw1hfPBYdHQ0vLy8cOnRIwuqkZUpfnD9/HsHBwQgMDMSbb74pcYXSMdYX2dnZePfddzF06FAEBgbi+++/t0CV5jdnzhy88sorGDJkSIXLq7zfFDVQcXGx6Nevn7hz544oKCgQQUFB4s8//yzT5sSJE2LcuHGipKREREVFiREjRlioWvMypS8uXbokMjIyhBCl/fI898XjdmPGjBHjx48XBw8etECl5mdKX2RmZopBgwaJpKQkIYQQDx8+tESpZmdKX3z11Vdi5cqVQgghUlNTRZcuXURBQYElyjWrCxcuiGvXronAwMAKl1d1v1kjRxTmmv7DGpnSFy+99BIaNmwIAPD29i5zb8qzxJS+AIDt27cjICAAjo6OFqhSGqb0xf79++Hv74+mTZsCwDPbH6b0hUwmg0ajgRACGo0GDRs2hFJptos+LaZLly66fUFFqrrfrJFBUdH0H2q12mCbx9N/PGtM6Yv/tXv3bvTq1UuK0iRn6r8XR44cwahRo6QuT1Km9EVCQgKysrIwZswYqFQq7NmzR+IqpWFKX7zxxhu4efMmfH19MXToUMydOxdyeY3c/ZlVVfebNTJSRTVO/2HtKvM9z507h927d2Pnzp3mLssiTOmLpUuXYsaMGVAoFFKVZRGm9IVWq0VMTAy2bt2K/Px8jBo1Cp06dULLli2lKlMSpvTF6dOn4eXlhW3btuHOnTt4++234ePjg3r16klVZo1Q1f1mjQwKTv/xhCl9AQCxsbH46KOP8PXXXz+zk6CZ0hfXrl3DtGnTAJROCBcZGQmlUon+/ftLWqu5mfrfSKNGjWBnZwc7Ozv4+PggNjb2mQsKU/oiPDwcEyZMgEwmQ/PmzeHu7o74+Hh07NhR6nItqqr7zRo59uL0H0+Y0hfJycmYOnUqVq5c+cztBP6XKX1x7Ngx3T8BAQFYsGDBMxcSgGl90a9fP1y8eBHFxcXIy8tDdHQ0PDw8LFSx+ZjSF02aNMHZs2cBAA8fPsStW7fg7u5uiXItqqr7zRo5ojDX9B/WyJS+WLt2LTIyMrBo0SIAgEKheCZnzDSlL54XpvSFh4eH7pi8XC7HiBEj0KZNGwtXXv1M6YvJkydjzpw5CAoKghACM2bMgIODg4Urr37Tpk3DhQsXkJ6ejl69emHq1KkoLi4G8HT7TU7hQUREBtXIQ09ERFRzMCiIiMggBgURERnEoCAiIoMYFEREZBCDgmokLy8vBAcH6/65e/eu3radO3d+6u3Nnj0bfn5+CA4OxvDhwxEVFVXpdcydOxdxcXEAgPXr15dZVl1TijzulyFDhuDdd99FVlaWwfY3btxAZGRktWybnl+8PJZqpM6dO5u8s65MW31mz56NPn36YODAgTh9+jRWrFiB/fv3V3l91VGTsfXOmjULLVq0wKRJk/S2Dw8Px7Vr1zB//vxqr4WeHxxRkFXQaDQYO3Yshg8fjqCgIBw5cqRcm/v37+ONN97Q/eK+ePEigNJ5fkJDQzF8+HC8//770Gg0BrfVpUsX3LlzBwCwZcsWDBkyBEOGDMHWrVsBALm5uZgwYQKGDh2KIUOG4MCBAwCAMWPG4OrVq/j000+Rn5+P4OBgTJ8+HcCTUc8HH3xQ5hf+7Nmz8dNPP0Gr1WLFihUICQlBUFAQvvvuO6N94u3trZvQLTo6GqNGjcKwYcMwatQoxMfHo7CwEGvWrMGBAwcQHByMAwcOIDc3F3PmzEFISAiGDRtWYT8SlfNUk58TmUnbtm3F0KFDxdChQ8XkyZNFUVGRyM7OFkKUPk+gf//+oqSkRAghhLe3txBCiM2bN4t169YJIUqfUZCdnS1SU1PF66+/LjQajRBCiA0bNogvvvii3PZmzZqle3bFgQMHxIgRI8TVq1fFkCFDhEajETk5OWLw4MEiJiZGHDp0SMydO1f32aysLCGEEG+++aaIjo4uU9Njj18fPnxYzJw5UwghREFBgejVq5fIy8sT3333nVi7dq3u/eHDh4s7d+6Uq/PxeoqLi8XUqVNFZGSkEEKI7OxsUVRUJIQQ4pdffhFTpkwRQgjx/fffi0WLFuk+v2rVKrFnzx4hROnzKgYMGKDrGyJ9auQUHkS1a9fG3r17da+Liorw2Wef4ddff4VcLodarcbDhw/h5OSka9OhQwd8+OGHKC4uRv/+/eHl5YXjx48jLi5ON71HUVERvL29K9zmypUr8dVXX8HBwQFLly7F2bNn0b9/f9jZ2QEA/P39cfHiRfj6+mLFihX45JNP0LdvX/j4+Jj8vXr16oUlS5agsLAQJ0+ehI+PD2rXro1ffvkFv//+O3766ScApU9ku337Npo1a1bm849HKklJSWjfvj169Oihaz9r1izcvn0bMpkMRUVFFW7/9OnTOHbsGL755hsAQEFBAe7du/dMzgFF1YdBQVZh//79SEtLQ3h4OGrVqgU/Pz8UFBSUadOlSxfs2LEDkZGRmDlzJsaNG4cGDRqgR48e+Oyzz4xuY+bMmRg4cKDu9ZkzZyps17JlS4SHhyMyMhKrVq1Cjx49MGXKFJO+h62tLbp27YpTp07h4MGDCAwMBFA6/fNHH30EX19fg59/HKDZ2dmYOHEivv32W7z11lv4/PPP0a1bN6xduxZ3797FW2+9pXcda9asQatWrUyqlwjgOQqyEtnZ2XB0dEStWrVw7tw5JCUllWuTlJQER0dHvPbaawgJCUFMTAy8vb1x+fJl3L59GwCQl5eHW7dumbTNLl264MiRI8jLy0Nubi6OHDkCHx8fqNVq1KlTB8HBwRg3bhyuX79e7rNKpVLvr/rAwECEh4fj4sWL6NmzJwCgZ8+e2LVrl+4zt27dQm5urt7a6tevj48++gjffPMNioqKkJ2dDRcXFwDADz/8oGtXt27dMudkevbsiR07duieS1BR7UR/xREFWYWgoCBMmjQJKpUKXl5eFf4ivnDhAjZv3gylUgk7OzusWLECDg4OWL58OaZNm4bCwkIApSeUTZmOvX379lCpVBg5ciQAYMSIEWjXrh1OnTqFlStXQi6XQ6lUYuHCheU++9prr2Ho0KFo164dVq1aVWZZjx49MGvWLPj5+cHGxgYAMHLkSCQlJUGlUkEIgUaNGmHdunUG62vXrh3atm2LiIgIjB8/HrNnz8aWLVvQvXt3XZtu3bph48aNCA4OxsSJEzF58mQsW7YMQ4cOhRACbm5u2LBhg9G+oOcbL48lIiKDeOiJiIgMYlAQEZFBDAoiIjKIQUFERAYxKIiIyCAGBRERGcSgICIig/4/j7xZY6u4tMoAAAAASUVORK5CYII=", "text/plain": [ - "Filter (num_proc=16): 0%| | 0/33558 [00:00" ] }, "metadata": {}, "output_type": "display_data" }, { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "dc429098c2a14f00be1e5921cde897dc", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Filter (num_proc=16): 0%| | 0/33558 [00:00:45: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n" ] }, @@ -497,55 +917,47 @@ "

\n", " \n", " \n", - " [834/834 02:35, Epoch 1/1]\n", + " [834/834 01:33, Epoch 1/1]\n", "
\n", " \n", " \n", - " \n", + " \n", " \n", " \n", " \n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", "
StepTraining Loss
830.708600
1660.656300
2490.5536001000.645900
3320.4306002000.582800
4150.3000003000.461700
4980.2029004000.350200
5810.1447005000.262800
6640.1099006000.180400
7470.0960007000.140900
8300.0867008000.109600

" @@ -558,77 +970,84 @@ "output_type": "display_data" }, { - "name": "stdout", + "name": "stderr", "output_type": "stream", "text": [ - "****** Validation split: 4/5 ******\n", - "\n" + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-8e85e7414566994a.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-e2704cdfc217c3e3.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-e213b038886d7cd4.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-d6c9eba9fe9ffafc.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-442181417de57bb6.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-0d8563be811b9c30.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-85690e0bf5863858.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-3bdda0a32e054f19.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-3abe0ffb170c29f0.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-b132478871346000.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-09db8f6a69301008.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-34ae599619e2ced6.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-c74b97625f913f63.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-228b6002a6690208.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-d644cc9c55478a2a.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-d3d097800ebd687c.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-2e536900ba2b88cc.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-0434f2adbb78af27.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-926036de71570e84.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-d7f012de8332824e.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-57a002ae2aa9ba42.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-0476d5fed302e1c5.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-69341790285e8ce2.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-ee190fa69ba78df3.arrow\n", + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-4b3dc879e23e8e63.arrow\n" ] }, { "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "1a9cebe980534274907ae3858a706c37", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Filter (num_proc=16): 0%| | 0/33558 [00:00" ] }, "metadata": {}, "output_type": "display_data" }, { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "7e3be2a6e2084240b6f657964466ccf2", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Map (num_proc=16): 0%| | 0/10000 [00:00:45: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n" ] }, @@ -639,55 +1058,47 @@ "

\n", " \n", " \n", - " [834/834 02:35, Epoch 1/1]\n", + " [834/834 01:32, Epoch 1/1]\n", "
\n", " \n", " \n", - " \n", + " \n", " \n", " \n", " \n", " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", " \n", " \n", "
StepTraining Loss
830.6975001000.660300
1660.6320002000.588000
2490.5246003000.465400
3320.3943004000.331400
4150.2647005000.241100
4980.1801006000.168800
5810.1283007000.136600
6640.094200
7470.082200
8300.0785008000.113900

" @@ -700,530 +1111,1300 @@ "output_type": "display_data" }, { - "name": "stdout", + "name": "stderr", "output_type": "stream", "text": [ - "****** Validation split: 5/5 ******\n", - "\n" + "Loading cached processed dataset at /n/holyscratch01/xiaoleliu_lab/Users/ctheodoris/datasets/geneformer_corpus_2048_sorted.dataset/cache-c438e6f7f8463bbc.arrow\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "455067153dc145cba4e3cfdc63f129cc", + "model_id": "6f8a9dd0a5754dec845c0022470a8c96", "version_major": 2, "version_minor": 0 }, "text/plain": [ - "Filter (num_proc=16): 0%| | 0/33558 [00:00\n", - " \n", - " \n", - " [834/834 02:35, Epoch 1/1]\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
StepTraining Loss
830.711400
1660.644000
2490.535900
3320.395400
4150.275400
4980.193600
5810.129300
6640.093300
7470.070000
8300.067100

" - ], + "application/vnd.jupyter.widget-view+json": { + "model_id": "17799d65feac4638a0071df44f6432db", + "version_major": 2, + "version_minor": 0 + }, "text/plain": [ - "" + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" ] }, "metadata": {}, "output_type": "display_data" - } - ], - "source": [ - "# 6 layer 30M Geneformer model: https://huggingface.co/ctheodoris/Geneformer/blob/main/gf-6L-30M-i2048/model.safetensors\n", - "all_metrics = cc.validate(model_directory=\"/path/to/Geneformer\",\n", - " prepared_input_data_file=f\"{output_dir}/{output_prefix}_labeled.dataset\",\n", - " id_class_dict_file=f\"{output_dir}/{output_prefix}_id_class_dict.pkl\",\n", - " output_directory=output_dir,\n", - " output_prefix=output_prefix)" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "11a1329b-4968-45f3-ac7a-2438b574404e", - "metadata": {}, - "outputs": [ + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, { "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "e103daf395794272989c209b32c12afc", + "version_major": 2, + "version_minor": 0 + }, "text/plain": [ - "

" + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" ] }, "metadata": {}, "output_type": "display_data" }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAqgAAAHcCAYAAAAa41gWAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy81sbWrAAAACXBIWXMAAA9hAAAPYQGoP6dpAACAAklEQVR4nO3dZ1hU19oG4GdABgQZFaQjiggoTUSKWMASRQWixt4LsYAtGmI0tqhEjBoxYCxRVERPJFhIrCdGEzFG1BgLdiUGGxakDIL0+X5w2J+TAaTKEJ77uuYcZ83aa6897sjLu8oWyWQyGYiIiIiIlIRKbXeAiIiIiOhNDFCJiIiISKkwQCUiIiIipcIAlYiIiIiUCgNUIiIiIlIqDFCJiIiISKkwQCUiIiIipcIAlYiIiIiUCgNUIiIiIlIqDWq7A0REpOjWrVvYuXMnzp8/j+fPnwMADA0N0bFjRwwZMgT29va13MMikZGR2LlzJ5KSkpCXl4cLFy5AIpHUdreIqI4T8VGnRETKZc+ePVi+fDnMzc0xdOhQWFpaAgD++usvHDp0CH/++SeOHz8OMzOzWu3nzZs3MWDAAAwZMgQDBgxAgwYNYG9vD1VV1VrtFxHVfcygEhEpkYsXL2Lp0qXw9PREaGgoxGKx8Jm7uztGjRqFo0ePQl1dvRZ7WeTu3bsAgKFDh8LBwaFa2nz9+jUaNmxYLW3V5T4Q1Xecg0pEpEQ2b94MVVVVLFu2TC44fVPfvn1hYGAgvI+Pj8fUqVPh6uoKe3t7DBgwAEeOHJE7Zv/+/bC2tkZcXByWLFkCNzc3uLm5Yfr06Xj27JnCOY4cOYJhw4bB0dER7du3h5+fH27cuCF8PmbMGHzyyScAgCFDhsDa2hrz5s0TPt+7dy/ef/992Nvbw9XVFdOmTUNCQoLcOebNm4f27dvj9u3bmDhxItq3b4/x48cDAKytrbFs2TLs27cPXl5ecHBwwAcffIDLly9DJpNh69at6NGjB9q3b4+xY8ciMTFR4Rp+//13jBs3Dk5OTmjXrh2GDx+Os2fPytUJCwuDtbU1rl+/jpkzZ8LFxQW9evUq8XsnoneHQ/xEREqioKAATk5OaNu2Lfbs2VOuY+Li4vDhhx+iXbt2GDVqFLS1tXHkyBHs378fwcHB+OCDDwAUBajz589H8+bN0a1bN3Tr1g1JSUlYvXo12rRpg507dwptbtq0CevWrcMHH3yA3r17Iy8vD+Hh4bh9+zaio6PRunVr3Lt3D4cOHcLGjRsRHByMVq1aQUdHB2ZmZti8eTPWrl0LHx8fvP/++0hNTcX69euRlpaGvXv3omXLlgCKAtRDhw5BX18fw4cPh729PQoKCtClSxdYW1vDxMQE+vr68PPzg0gkwurVq5GWloYBAwbg4cOHGDp0KDIyMrBy5Uro6urihx9+gEgkAgD88MMP+PTTT9GzZ09h+kFUVBRiY2MRHh4Od3d3AEUB6vr162FiYoJ+/fqhU6dOyMrKwnvvvVeNf7NEVGEyIiJSCi9evJBZWVnJZs+erfBZfn6+LC8vT3gVFhbKZDKZrE+fPrIBAwbI8vLy5OpPmTJF1rlzZ1lBQYFMJpPJ9u3bJ7OyspJ9/vnncvW2bNkis7Kykj1//lwmk8lkT548kdnY2MiWL18uV+/Vq1eyzp07y2bNmiWUFbd59epVoSw9PV3m4OAgmzRpktzxT548kdnZ2cnmzJkjlH366acyKysr2d69exWu18rKSta5c2dZZmamUHb8+HGZlZWVrH///sL1y2Qy2Y4dO2RWVlayW7duyWQymSwrK0vm6uoqmzJlilybBQUFsvfff182ePBgoSw0NFRmZWUl+/rrrxX6QES1h0P8RER1wAcffABbW1vhtW3bNiQmJuKvv/6Cr68vACA/P194eXh44MWLF7h//75cOz169JB7b21tDQB48uQJAOC3335Dfn4++vfvL9eeuro6XFxccP78+TL7eenSJWRnZ2PgwIFy5UZGRujYsSPi4uIUjvHy8iqxLTc3N2hqagrvLSwsAAAeHh5CpvTN8uJruHTpEtLS0jBw4EC5aygsLETXrl0RHx+PrKwsuXP17t27zOsioneLi6SIiJRE06ZNoaGhIQRab/rqq6/w+vVrvHjxAv7+/gCA5ORkAMCXX36JL7/8ssQ2U1NT5d43adJE7n3xPNfs7Gy5NgcPHlxieyoqZec10tLSAAB6enoKn+nr6+P333+XK2vYsCEaNWpUYluNGzeWe6+mplZmeU5ODoD/v4aZM2eW2s/09HS54FdfX7/UukT07jFAJSJSEqqqqujYsSPOnDmD58+fywVNrVu3BgA8evRIKGvatCkAYMqUKaUu7DE3N69QH4rbDA0NhbGxcYWOBf4/AH7x4oXCZ8+fPxfaL/ZmJrS6FJ9j0aJFaNeuXYl1dHV1q/28RFR9GKASESmRyZMnIzY2FkuWLEFoaKiQHSxJq1at0LJlS9y6dQtz5syplvN36dIFDRo0wIMHD0odei9L+/btoaGhgR9//BF9+/YVyp8+fYq4uLhKtVlRTk5OkEgkuHfvHkaPHl3j5yOi6scAlYhIiXTo0AGLFy9GUFAQPvjgAwwdOhStW7eGiooKXrx4gZ9++gkAhGHxpUuXYtKkSfDz88PAgQNhYGCA9PR0JCQk4Pr16wgNDa3Q+U1NTTFz5kysW7cODx8+hIeHByQSCZKTkxEfH4+GDRuWOXQukUgQEBCAtWvXYu7cufD29kZaWhq++eYbqKurY/r06ZX/cspJS0sLCxcuxLx585Ceng4vLy/o6uoiJSUFt27dQkpKCpYuXVrj/SCiymOASkSkZEaMGIH27dsjIiICO3bswPPnzyESiWBoaIj27dtjx44dwjZJHTt2RHR0NDZt2oQVK1ZAKpWiSZMmsLCwkMtgVsSUKVNgYWGBnTt34vDhw8jNzYWenh7s7OwwYsSIch2vo6ODyMhIHDlyBBoaGnB1dcWcOXOELaZqWv/+/WFsbIytW7diyZIlyMzMhI6ODtq2bauwgIuIlA/3QSUiIiIipcJtpoiIiIhIqTBAJSIiIiKlwgCViIiIiJQKA1QiIiIiUioMUImIiIhIqTBAJSIiIiKlwn1Qid6RS5cuQSaTlflkICIiUl55eXkQiURo3759jZ3j9u3byM3NrZa2xGIxrK2tq6Wtd40BKtE7IpPJkJdfgMcvX9d2V4hqjKm+dm13gajGvIut43Nzc5H1OhvPUzKr1I6+jlY19ah2MEAlekfU1NTw+OVrDP36cm13hajG3N75YW13gajGvHx4Gw1URDV+nucpmRg6f2+V2vg+eDBammhUU4/ePQaoRERERMpGVL+XCTFAJSIiIlI2oprP1Cqz+h2eExEREZHSYQaViIiISNlwiJ+IiIiIlEo9H+JngEpERESkTESiqmdQ63iAW7/zx0RERESkdJhBJSIiIlI2dTwDWlUMUImIiIiUTT1fJFW/r56IiIiIlA4zqERERETKhkP8RERERKQ8qmEVP+p2gMshfiIiIiJSKsygEhERESkbDvETERERkVLhKn4iIiIiIuXBDCoRERGRsuEQPxEREREpDRGqPsRfx+NbBqhERERESoXbTHEOKhEREREpFWZQiYiIiJSNSt3OgFYVA1QiIiIiZcNtpoiIiIiIlAczqERERETKhttMEREREZFS4RA/EREREZHyYAaViIiISNlwiJ+IiIiIlIaoGjbqr+MBLof4iYiIiEipMINKREREpGzqeAa0qhigEhERESmber6KnwEqERERkbKp5xnU+h2eExEREZHSYQaViIiISKlUwyp+1O0MLANUIiIiImXDIX4iIiIiov93//59+Pn5wdHREe7u7ggKCkJ2dvZbj8vKysKaNWvw3nvvoV27dujduzfCwsKQm5tbofMzg0pERESkbGpxFb9UKsW4ceNgbGyM0NBQpKSkIDg4GGlpaVizZk2Zx37++ef4+eefMXv2bFhaWuLq1asIDQ1Feno6Fi5cWO4+MEAlIiIiUiYiVMOTpCp/6J49eyCVShETEwMdHR0AgKqqKgIDA+Hv7w8LC4sSj8vPz8exY8fw4YcfYsyYMQCAjh074smTJzhy5EiFAlQO8RMRERGRIDY2Fu7u7kJwCgBeXl4Qi8U4depUqcfJZDIUFBRAW1tbrlwikUAmk1WoDwxQiYiIiJSNSFS1VxUkJCQoZEnFYjHMzMyQkJBQ6nFqamr44IMPEBkZiStXriAzMxNxcXH4/vvvMWrUqAr1gUP8REREREqleraZSkpKwkcffVRqjRMnTpRYLpVKIZFIFMolEgnS09PLPOvnn3+OJUuWYOjQoULZmDFjMH369PJ1+38YoBIRERHRW8lkMojekp1ds2YNfv31Vyxfvhzm5ua4fv06QkNDIZFIMHPmzHKfiwEqERERkbKphn1QjYyMSs2SlkUikUAqlSqUZ2RklLpACgDu3LmDbdu2YcOGDejZsycAwMXFBSKRCKtWrcKoUaOgq6tbrj5wDioRERGRshGpVO1VBRYWFgpzTXNzc/HgwYMyA9R79+4BANq2bStX3rZtW+Tn5+Px48fl7gMDVCIiIiJlU4uLpDw8PBAXF4fU1FSh7Pjx48jNzYWnp2epx5mYmAAArl+/Lld+7do1AICpqWm5+8AhfiIiIiISDB8+HLt27UJAQAACAgLw8uVLrFy5Er6+vnIZ1M8++wwxMTG4ceMGAMDOzg4ODg5YsmQJkpOTYW5ujvj4eGzYsAH9+vWT27bqbRigEhERESmZty1GqkkSiQQREREICgrCjBkzoKGhAR8fHwQGBsrVKywsREFBgfBeVVUVmzZtwtdff40tW7YgOTkZRkZGGD16NKZOnVqhPjBAJSIiIlIytRmgAoC5uTnCw8PLrLNy5UqsXLlSrkxXVxfLli2r8vk5B5WIiIiIlAozqERERETKRPS/V1XbqMMYoBIREREpFVE1DPHX7QiVQ/xEREREpFSYQSUiIiJSMrW9SKq2MUAlIiIiUjL1PUDlED8RERERKRVmUImIiIiUTH3PoDJAJSIiIlI29Ts+ZYBKREREpGzqewaVc1CJiIiISKkwg0pERESkRESiqmdQ63oClgEqERERkZLhED8RERERkRJhBpWIiIhIydT3DCoDVCIiIiJlU7/jUw7xExEREZFyYQaViIiISKmIqmGIv26nYBmgEhERESmZ+j4HlUP8RERERKRUmEElIiIiUjL1PYPKAJWIiIhI2dTv+JQBKhEREZFSqYZHndb1AJdzUImIiIhIqTCDSkRERKRERKh6BrWOJ1AZoBIREREpm/q+SIpD/ERERESkVJhBJSIiIlIy9T2DygCViIiISNnU7/i07gaoYWFhWL9+PYCi3zK0tLRgbGwMFxcXjBo1ChYWFrXcw/rh0aNH6NmzJ77++mv06dMHALBjxw6Ym5vD09NTru6YMWOgqamJzZs312if3rw3SmNiYoKTJ09i3rx5OHDggMLnXbp0QXh4eE11kYiIiMpQZwNUANDQ0EBERAQAIDMzE3fu3EFUVBS+//57fPHFF+jfv38t9/DfT19fH1FRUWjZsqVQtnPnTnTr1k0hQF2yZAlUVGp+2vOQIUPQtWtX4X10dDQOHTok3CsAIBaLhT83b94ca9askWtDW1u7xvtJRERUMlE1DPHX7RRsnQ5QVVRU4OjoKLzv3LkzRo4cicmTJ2PBggVwcnJC8+bNa6+D9YBYLJb7OyhL69ata7Yz/2NoaAhDQ0Ph/enTpxXulTdpaGiU+xqIiIjehfo+B/Vft4pfXV0dixYtQl5eHqKjo4XywsJCbNq0CT169ICdnR169+6NHTt2yB379OlTzJo1C506dYK9vT169OiBFStWCJ8nJCRg9uzZ8PT0RLt27dCvXz9s27YNhYWFCu1MmTIFDg4O6Nq1K7Zu3Yply5ahR48eCvUCAwPh5uYGBwcHjBo1CteuXXvrNUqlUixcuBBdu3aFvb09PD09MXv27Aq33aNHDyxbtgy7du1C9+7d0aFDBwQEBCAlJUWok5eXhy+//BLdu3eHnZ0dunTpgqlTpyIjIwNA0RC/tbU1jh07JrT5+PFj7N69G9bW1rC2tsb+/fsBFA3xT5kyBQBw7tw5WFtbK/SpoKAAnTp1wqpVq+S+d39/f3To0AGOjo6YPHkyHjx48Nbvqbrs3bsX3t7ecHBwgJubG0aMGIGrV6++s/MTERHVN3U6g1qa1q1bw8DAAJcuXRLKVq1ahYiICEyZMgXOzs44c+YMgoODkZmZiWnTpgEA5s6di+fPn2PhwoXQ1dVFUlKSXAD1/PlzmJubw9fXF1paWrh58ybCwsKQlZWF6dOnAwBkMhkCAgKQnJyMZcuWQVtbG1u3bsWTJ0+gqqoqtJWeno6RI0dCU1MTixYtgra2NiIjIzFu3Dj89NNP0NXVLfX6goODcfr0aXz88ccwMTHBixcvEBsbW6m2T548icTERCxevBipqalYsWIFli9fjpCQEADA5s2bsWfPHgQGBsLS0hKpqak4c+YMcnNzS+zb+vXrMXnyZDg5OWHixIkAADMzM4V6Li4uMDAwwOHDh2FnZyeUx8XF4eXLl/Dx8QEAPHz4EMOHD4elpSVWrlwJkUiETZs2Yfz48Th27JjcUH1V5Ofny71v0KDoP40LFy5gwYIFmDhxIjw9PZGdnY2rV68KAToREVG146NO/50BKgAYGRkhOTkZAJCSkoJdu3ZhwoQJ+OijjwAULYLJzMzE1q1bMX78eGhpaSE+Ph5z5sxBv379hHYGDBgg/Nnd3R3u7u4AigLRDh06IDs7G7t27RIC1NjYWFy/fh27d++Gs7MzAMDNzQ0eHh5o0qSJ0FZERASkUimio6OFgNHd3R29evVCeHg45s6dW+q1xcfHw8fHBwMHDhTKvL29K9W2TCbDxo0bhUAvMTER4eHhKCwshIqKCuLj49GlSxeMGjVKOMbLy6vUvtnY2EAsFqNZs2ZlDpurqKigX79+OHLkCObOnSv8h3jo0CGYm5vDxsYGQFHAK5FIsH37dqirqwMAnJyc0LNnT0RHR8v1q7Lu3r0LW1tbubLiv7+rV6+iSZMm+PTTT4XPunXrVuVzEhERlamWA8z79+8jKCgIFy9eRMOGDeHt7Y3AwEBoaGiUekzxwumSqKmplWuUuNi/NkCVyWRC0HP16lXk5eXJBZ5AUVAXFRWFmzdvwtnZGTY2Nti2bRtUVVXRuXNntGjRQq5+Tk4ONm/ejIMHDyIpKQl5eXnCZ5mZmUKQK5FIhOAUABo1agQ3Nzfcvn1bKDtz5gzc3NzQuHFjIXunoqICZ2dnxMfHAyialvDm9AEVFRWoqKjAxsYGBw4cgJ6eHrp27QorKyu5fpan7WIuLi5yWcjWrVsjLy8PL1++hJ6eHmxsbBAeHo6wsDB4enrCzs6u2hY6eXt7Y/v27bh48SKcnZ2Rm5uLn3/+GePGjZO7ln79+kFVVVW4FolEUuL0gMoyMzPD2rVr5cpatWoFoCjgTktLw7x58+Dr6wsnJyc0bNiwWs5Lb6eloYYFo9wwoHNrNG2kjruP07Bu30Xs/+3eW4/tYmeCOYM7wK6lLhqqqyHxWTp2Hr+JrUfjUVgoE+p5ObfAgM6t4dBKD5YmTaDWQBVNB3xTk5dFJMh8nYN1247i6KkrSJdmoZWZPiYP7wHvHu3LPO7pizSEf/8rbt57jFsJT5CRmY3gT4bhgz6uCnVz8/KxcdfP+OHni3ienA49HQl8erbHtDG9oaGuVlOXRlVUm3NQpVIpxo0bB2NjY4SGhiIlJQXBwcFIS0tTWFT8puKF02+SyWSYNGkS3NzcKtSHf22A+vTpU2FleXp6OgBAT09Prk6zZs0AAGlpaQCAkJAQhISEYN26dVi6dCnMzc0xZ84c9O7dGwCwevVqREdHY9q0abCzs4O2tjZOnDiBjRs3IicnB1paWnj+/Dl0dHQU+vPPIfvU1FRcvnxZIXMH/P+Q+DfffCO3XdL06dMxY8YMLFq0CI0bN8b27duxatUqGBkZYfLkyRg5cmS52y4mkUjk3qupFf1jlZOTAwDw9/eHiooKDhw4gPXr10NHRwejRo3CtGnTqvwfj729PVq2bIlDhw7B2dkZsbGxkEqlwvB+8bVERETIrcAvVtZvcRWhrq4Oe3v7Ej9zd3fHqlWrsHPnTvj5+UFdXR1eXl747LPP5DLiVDN2zusDp9b6WLozDveepGGwhyXCA72goiLC3ti7pR7n6WCKfUt88fuNJ5i14VdkZeehr6s5vpzUFeaGEswP/02o692xFZytDHH1/gvk5BWgfWv9d3FpRACAGUt2IP72Q3z8oTdamurh0Mk/MeeLXSiUyeDb06nU4xIfJ+PgiT/R1sIYnm5tcejkpVLrfvzFLpw6dxPTxvSGvXVzXLqRiI27j+Pu30+xKcivJi6L6rg9e/ZAKpUiJiZGiGlUVVURGBgIf3//UrfyLGnh9Llz55CRkSH3s708/pUB6t27d/Hs2TNhCLw4kEhOToaBgYFQr3gKQPHn+vr6CA4ORmFhIa5du4aNGzdi9uzZOHbsGJo3b45jx45h2LBhmDx5stDGqVOn5M6tr68vt8io2MuXL+XeN27cGF27dsWsWbMU6hZnNIcOHSo3nKyvX/SDU1tbGwsWLMCCBQtw+/Zt7Ny5E0uXLoWlpSVcXFzK1XZ5icVizJgxAzNmzEBiYiL27duHsLAwmJqayk1/qCxvb2989913WLhwIY4cOQJbW1u5LasaN24MT09PIfh+k5aWVpXPXx79+/dH//79kZKSghMnTiA4OBgNGjSQW0BH1a9Xhxbo4WiGD7/6CftOFwWjv117jOb62lg6rhP2/3ZPLhP6ppE92iCvoBDDgw4jK6co837q6iO0NmmCkT3ayAWos775BbL/NbNqUlcGqPTOnDp3E2cu3sFXC0bBp0dRMNqxfWs8eZaKVZsPol83R6iqljxi5eLQCnH7lwEA4m8/LDVAvXwjET+djse8qe9jwpCirf86dbBCA1UVrA0/gjN/3EZnZ+sauDqqqtrMoMbGxsLd3V0u4VacnDl16lSF9po/dOgQGjVqpLBQ/G3+dav4c3JysHz5cojFYgwZMgRAUaZOTU0NR48elat75MgRaGpqCvMdi6moqMDBwQEfffQR8vPzkZiYKLRdnGEEilacHz58WO5Ye3t7SKVSXLhwQSh79eoVzp07J1evU6dOSEhIgIWFBezt7eVe1tZF/1gYGBjIlb8ZXBeztrbG/PnzAQB//fVXuduujBYtWmDOnDlo0qSJcK6SqKmpCRnYt/H29kZKSgp++eUX/PLLL/D19ZX73N3dHXfv3oWNjY3CtRQPw78rOjo6GDJkCDp37lzm9VP18HYzR8brXMSckR/O/8+JWzDWbQRnS8X/HorlFRQiN78Qr3PlF7+lZ+YgO69ArkxWcoxLVOOO/xYPzYbq6OPZTq78gz4ueP5Siiu3Eks9trxTrf68dh8A4OHWRq68W8ein3v/PR2vcAzVPhGKAtQqvapw/uIY4k1isRhmZmZISEgodzt5eXn46aef0KtXL2EdSXnV6QxqYWEhLl++DADIysoSNup/+PAhVq5cCVNTUwBFgcWYMWOwbds2iMViODk54ezZs4iKisKMGTOgqamJjIwM+Pn5oX///jA3N0deXh4iIyMhkUiEALZTp06Ijo5G69atoaOjg927dyusZvfw8ICtrS0+/vhjzJkzBxKJBFu2bIG2trbcb0Pjx4/HwYMHMXr0aIwdOxbGxsZISUnBlStXYGBggPHjx5d63cOHD0evXr1gaWkJVVVVxMTEQE1NTZj3WpW2/ykgIAC2trawsbFBw4YN8csvvyAtLQ0dO3Ys9ZhWrVohLi4OZ86cgUQigampKZo2bVpiXQsLC9jY2GDZsmV4/fq1wjzhmTNnYvDgwfDz88PQoUPRrFkzJCcn4/z583B2dq7wkEFFhYaGIi0tDa6urtDV1cWdO3dw+vTpCn2HVDltzXRx52EqCv6RJb3+d9FoRNsWOjh/+2mJx24/dh2Dulriyw+7Yu3ei8jKzUdfl5bwcWuFZbviarzvROVx9/5TWJjpo8EbO7wAgHUrY+FzJ1vzKp0jL7/oFzKxmvyP++L3d/56UqX2SbklJSUJi8NLcuLEiRLLpVKpwhRAoGhaYPG0yfKIjY1FWlpapX5W1+kANTs7G8OGDYNIJIKmpiZMTEzg7u6O9evXK0T+n3zyCSQSCaKjo/Htt9/CyMgI8+bNEwINdXV1WFlZITIyEklJSdDQ0ICdnR3Cw8OFFPeiRYuwZMkSLF++HA0bNsTAgQPRq1cvLFy4UDiPSCTChg0bsHjxYixevBgSiQRjx47F3bt3cffu/8+Za9q0KaKiorBu3TqsWbMGaWlp0NXVRbt27dCrV68yr9vJyQkxMTF49OgRVFRUYGVlhU2bNgnXXJW2SzrX0aNHsX37dhQUFMDc3BxfffUVOnXqVOoxc+bMweeff44ZM2YgMzMTwcHB+OCDD0qt7+3tjdWrV8PV1VUhS9yiRQtER0cL84KzsrKgp6cHFxeXKmWDy8ve3h4RERE4evQoXr16BUNDQ/j5+cHf37/Gz13f6Whr4O9niv8Qpr7KFj4vzcW7z9B/0Q/YPtcLk7wdAAD5BYVYFhmHb364XCP9JaqoNGkmTI0VtxRsrK35v8+zqnwOixZF/6b+ee1vNDf6/3NdvFY0CpRaDeegmqGMG/W/uQC9PA4ePIhmzZoJOyBVRJ0NUIvnRZaXiooK/P39Sw0sxGIxgoKCymyjWbNm+OYbxdW9xVMJihkaGuLbb78V3ufm5qJv375wdZVfXamnp4cvvviivJcgmDt3bpnbUJW37ZMnTyqU9enTR263gQ8//BAffvhhqW2YmprK1QcAS0tL7N69W6FuZGRkiW287RwtW7bEunXrSv38bcq6V1auXFnmsd27d0f37t0rfW6qojKG38samm9noYfI+X1x8c4zzNn4KzKz8+HhYIIFo9ygLlbFmu//qP6+ElVCWQOx1RGfeLi2QQuTZliz5RCaNW0E+zZmuHwjESHhR6GqogIVFeULguh/quGvxsjIqNQsaVkkEgmkUqlCeUZGRrnnn2ZmZuLXX3/F4MGD5faBL686G6Aqs6ioKBQWFsLc3BxSqRTfffcdkpKSSlzoQ0QlS8nIRtMSsqRNGxWVFWdSS7J6sgdepGVh9MqjwkKq3649RmEhMG+YC6JP3UHiM8V/fInepSYSLaRJMxXK0zOKsprFmdSqEKs1wJbgSZgb/B9M/LQocaKpIcZsv37YsOs4DJo1rvI56N/HwsJCYa5pbm4uHjx4gEGDBpWrjePHj+P169cKa0vKiwFqDVBXV8eWLVvw6NEjAECbNm2wefPmUrcyIiJFNx68xKCullBVEcnNQ7VpUTTl5mai4m4ZxezNm2Hf6bsKq/wv3XsGVVUVWJs2ZYBKtc6qlREOnbyE/IICuXmod/5KAgBYmhtWy3lamDRD1PqZePYiHWkZWTAz1kVGZja++CYGzvbvdrEplV9tDvF7eHhg48aNSE1NFdaQHD9+HLm5ufD09CxXG4cOHYKZmRnatWv39sol+Net4lcGAwYMwOHDh3HlyhVcuXIFUVFR6Nq1a213i6hOORz3F7QbivG+u/xw0ojubfDk5Sv8cfdZqcc+TclE+9b6CsOXLtZFP/CfvHxV/R0mqqD3Otsh63UOfoq9Kld+4Kc/oK8rQbs2LUo5snIM9BrDupURGmqIER71CzQ1xBjSt2Kbp9M7UtUV/CJRleaIDB8+HNra2ggICMDp06cRExOD5cuXw9fXV26I/7PPPlPYCQkoeoLn2bNn5Z5yWVHMoBKRUvr5zwc4efkBvprqCW1NMf5KSscgD0u816EFJq89LmRHQ6d3x4jubeA0dRcevsgAAGw4eAWrJnlgzwJvbP/vdbzOyYOngymm9XfEL5cf4trf/78vcXM9bWHvU3PDouHO4qD4wXMpLie8eJeXTfWIp1tbdO5ghc+/3odXWTkwM26Gw79cwukLt7B6/khhD9TPVkch5qc/cHzXfJgY/P++lMdOXQEAPEwqup+v3XkEzYZFW/m8uXXVlj0noacjgZF+E7xMzcDRU1fw85lrWDVvJAz0OMRPiiQSCSIiIhAUFIQZM2ZAQ0MDPj4+CAwMlKtXWFiIgoICheOPHj2K/Pz8Sg/vA4BIJuMugETvQnx8PP5+KsXQry/XdlfqDC0NNSwsftSptgbuPkpFyD8edfrNzB4Y2aMtHCbvxMPnGUK5T8dWCHi/HSxNmkJD3AAPn0ux77e72PjjFWHzfgAY0aMNNsws+dnR/zl5E9NCFRcTUulu7yx9wSMpynydg5Dwozh26jLSMrLQqrk+pozoKfeo03lffocDP/2BE7sXwNTw/wNU654fl9ru7RNfCX9ev/Mn/HD8Dzx9kQ4NdTW0a9sCU0f2hLMDh/cr6uXD22igIqrRKXvx8fF4mPIaMw++fHvlMoT66qK5TsM6O72QASrVinPnzmHs2LElfmZubo5jx47Jlf3xxx8IDQ1FfHy8sLXWihUrYG7+/3sE3r9/H0FBQbh48SIaNmwIb29vBAYGyj0Sddu2bfjxxx/x6NEj5Ofno3nz5hg2bBhGjRqlMN8nPDwcu3fvxosXL2BlZYW5c+dW+FnCb2KASvUBA1T6N3uXAeqsQ6XPsy+Pr3106nSAyiF+qhW2traIioqSK3v16hUmTZoEDw8PufIzZ85gypQpGDp0KKZOnYq8vDxcuXJF7mlVUqkU48aNg7GxMUJDQ5GSkoLg4GCkpaVhzZo1Qr3i5wFbWlpCTU0NZ8+eRVBQEF69eoWpU6cK9cLDwxESEoLZs2fDxsYG0dHRmDRpEqKjo9/J/qtERFS/KeE2qO8UA1SqFY0aNYKjo6Nc2f79+1FYWCj3xIn8/HwsXLgQEydOxJw5c4Tyf64i3LNnD6RSKWJiYoQHK6iqqiIwMBD+/v7CpO5Zs2bJHdepUyc8efIEBw4cEALU3NxcbNy4EWPHjoWfnx8AwNXVFb6+vti0aRNCQkKq50sgIiKiEnEVP73VvHnz4OPjg3PnzmHAgAFwdHTE4MGDce3atWo9z6FDh9CyZUs4ODgIZWfOnMGTJ08watSoMo+NjY2Fu7u7EJwCgJeXF8RiMU6dOlXmsU2bNkVeXp7w/s8//xQyrcVUVVXRr18/nDp1CpwVQ0RENUkEVHkVf11PwDJApXJ58eIFgoKC4Ofnh5CQEGRnZ2P69OlCYFdQUID8/PwyXyWt9CuWnJyMuLg4hef1XrlyBU2aNEF8fDy8vLxgY2ODvn374siRI3L1EhISFJ5uIRaLYWZmprDZMFCUmS1+ykVMTIzcfNji+q1ayS8gsLCwQGZmJp49K317IyIioupQvFNUZV91HYf4qVzS09Oxa9cuWFpaAih6GMGECRNw5coVODs7Y/z48Th//nyZbZiYmJT4eFUAOHLkCAoKChQC1OTkZLx+/RoLFizArFmzYG5ujv3792P27NnQ19eHs7MzgKI5qBKJRKFdiUSC9HT557knJiaid+/ewnt/f3+MHz9eeC+VSiEWi+UWVwFA48ZF27GkpaXB0LB6NtAmIiIiRQxQqVz09fWF4BSAkK0sziYuXboUmZmKj+x7k1gsLvWzgwcPwtbWVm5VPlC0x1pOTg4+/fRT4VGxHTt2xO3bt7F582YhQC2NTCZTWJ1vZGSEvXv3IisrCxcuXMCWLVugoqKCmTNnCnVKeoJH8dB+bT7dg4iI6gERFB40Upk26jIGqFQu/8xOqqmpAYCwkr5FixZvnZtZWmD34MEDXL16FfPnz1f4rDhr2bFjR7l23Nzc8PPPP8v1TypVfHRlRkZGiUP/xdtuuLm5QVNTE2vWrMGIESOgp6cHiUSCnJwc5OTkQF1dXTiuuP3iPhEREdWU+p4LYYBK1aIqQ/wHDx6EiooK+vbtq/DZP4PLYv/MjFpYWCjMNc3NzcWDBw8waNCgMvtla2uLgoICPH78GHp6esI5ExIS5B7hlpCQAC0tLRgYGJTZHhEREVUNA1SqFlUZ4j98+DBcXV1LDPy6dOmCBg0a4OzZs0LgKJPJcO7cObRp00ao5+HhgY0bNyI1NRVNmzYFABw/fhy5ubkKW1L908WLFyESiWBqagoAcHJygra2No4cOSIEqAUFBTh69Cg8PT05xE9ERDWuvv+sYYBK1eKfK97L68aNG0hISMCECRNK/FxfXx8jR47EV199BZlMhpYtW+LAgQO4d+8eVq5cKdQbPnw4du3ahYCAAAQEBODly5dYuXIlfH19hcA2IyMDkyZNwvvvv48WLVogPz8fcXFxiIyMxLBhw9CsWTMARYG0v78/QkJCoKOjI2zU//DhQ6xdu7ZS10lERFQR9Tw+ZYBKtevgwYMQi8Xw8vIqtc6nn34KLS0tfPvtt0hNTYWlpSU2bdoEW1tboY5EIkFERASCgoIwY8YMaGhowMfHB4GBgUIddXV1mJubY8eOHXj27Bk0NDRgZmaGpUuXYsCAAXLnnDhxImQyGSIjI5GcnAwrKyt8++23fIoUERHROyCScddxonciPj4efz+VYujXl2u7K0Q15vbOD2u7C0Q15uXD22igIqrR59vHx8fjUeprfPqz4sLfivjyPQlMmzas0b7WJGZQiYiIiJRI8ZOkqtpGXcYAlYiIiEjJ1Pc5qHzUKREREREpFWZQiYiIiJSKqBq2marbKVgGqERERETKRFQNQ/x1Oz7lED8RERERKRdmUImIiIiUDJ8kRURERERKpZ7HpxziJyIiIiLlwgwqERERkZLhED8RERERKY2iJ0lVvY26jEP8RERERKRUmEElIiIiUjIc4iciIiIipVLP41MGqERERETKpr5nUDkHlYiIiIiUCjOoRERERMpEVA1D/HU8AcsAlYiIiEjJcIifiIiIiEiJMINKREREpGTqeQKVASoRERGRsuEQPxERERGREmGASkRERKRERCjKoFbpVcU+3L9/H35+fnB0dIS7uzuCgoKQnZ1drmPT0tLw+eefo0uXLrC3t4eXlxf27NlTofNziJ+IiIhIydTmCL9UKsW4ceNgbGyM0NBQpKSkIDg4GGlpaVizZk2Zx2ZmZmLMmDFQV1fHZ599Bl1dXSQmJiIvL69CfWCASkRERESCPXv2QCqVIiYmBjo6OgAAVVVVBAYGwt/fHxYWFqUeu3nzZmRnZyM6OhoaGhoAADc3twr3gUP8REREREqlisP7IhGqslN/bGws3N3dheAUALy8vCAWi3Hq1Kkyj923bx8GDx4sBKeVxQCViIiISJn870lSVXlVZRJqQkKCQpZULBbDzMwMCQkJpR738OFDJCcnQyKRYMqUKbCzs4ObmxuWLl1a7vmrxTjET0RERKRkqmObqaSkJHz00Uelfn7ixIkSy6VSKSQSiUK5RCJBenp6qe0lJycDAFatWoU+ffpgy5YtuHfvHtauXYu8vDwEBQWVu+8MUImIiIjorWQyWZmBc2FhIQDAwsICwcHBAAB3d3fk5+dj1apVmDVrFvT09Mp1LgaoREREREqmOlbxGxkZlZolLYtEIoFUKlUoz8jIKHOBVJMmTQAAHTt2lCvv2LEjCgsLkZCQUO4AlXNQiYiIiJSMikhUpVdVWFhYKMw1zc3NxYMHD8oMUJs3bw41NTWFcplMVnRNKuUPOxmgEhEREZHAw8MDcXFxSE1NFcqOHz+O3NxceHp6lnqcWCxG586dcfbsWbnys2fPokGDBmjdunW5+8AAlYiIiEiJFD1JqoqvKpx/+PDh0NbWRkBAAE6fPo2YmBgsX74cvr6+chnUzz77DDY2NnLHTps2Dbdv38bcuXPx22+/YceOHQgLC8OoUaPktq16G85BJSIiIlIy1bGKv7IkEgkiIiIQFBSEGTNmQENDAz4+PggMDJSrV1hYiIKCArkyBwcHbN68GV999RWmTp2KJk2aYPTo0Zg1a1aF+sAAlYiIiIjkmJubIzw8vMw6K1euxMqVKxXKO3fujM6dO1fp/AxQiYiIiJSMSu0lUJUCA1QiIiIiJVObQ/zKoFwB6vz588vdoEgkwooVKyrdISIiIiKq38oVoJ47d67cDdb3iJ+IiIioSkTVsFF/HQ/HyhWgnjx5sqb7QURERET/I6rrEWYVcQ4qERERkRIRoeqLpOp6eFvpAPX06dM4f/48UlNTERAQAGNjY1y9ehWmpqYV2oiViIiIiOhNFQ5QX79+jYCAAJw9e1aYbzpixAgYGxtj27ZtMDIywqefflrtHSUiIiKqL+r7mp4KP+o0JCQE165dQ1hYGP744w/IZDLhs86dO+P333+v1g4SERER1TdVfdRpXVfhDOqxY8cwa9Ys9OrVS+HxVsbGxkhKSqq2zhERERFR/VPhADUlJQWtW7cu8TMVFRVkZ2dXuVNERERE9Zeo3j9JqsJD/AYGBrhz506Jn92+fRumpqZV7hQRERFRfVbfh/grHKD27t0bmzZtwo0bN4QykUiEx48fY8eOHejTp0+1dpCIiIiI6pcKD/FPmzYNZ8+exZAhQ2BpaQmRSIT58+fjwYMHMDc3x+TJk2uin0RERET1RtVX8cveXkWJVTiD2qhRI+zZswezZs2CpqYmzMzM0LBhQ0yZMgW7d++GhoZGTfSTiIiIqF6o6vD+v2GYv1Ib9WtoaGDy5MnMlhIRERFRtav0k6RycnJw/fp1pKWloUmTJrC1tYW6unp19o2IiIioXlKp50P8lQpQt2/fjg0bNuDVq1eQyWQQiUTQ0tJCQEAAJk6cWN19JCIiIqpX6vgIfZVVOECNjIzEl19+ic6dO8PHxwfNmjVDcnIyDh48iNWrV6NBgwYYO3ZsTfSViIiIqF6o7486rXCAGhERgffffx+rVq2SKx84cCACAwOxc+dOBqhEREREVGkVXsX//Plz+Pr6lvhZ//798fz58yp3ioiIiKg+UxFV7VXXVTiD2rJlS7x8+bLEz168eIEWLVpUuVNERERE9ZUIVR/ir+sxaoUzqDNnzkRoaKjC405v3bqF9evXY+bMmdXWOSIiIiKqf8qVQZ06darc+4KCAgwYMACtW7eGnp4eXrx4gXv37kFfXx/79+9Hr169aqSzRERERPVBPV8jVb4A9Z/ZUlVVVRgaGuLVq1d49eoVAMDQ0LDEukRERERUMVzFXw4nT56s6X4QEREREQGowpOkiIiIiKhm/BtW4ldFlQLUlJQUZGdnK5QbGxtXpVkiIiKi+ktUDUP8dTzArVSAumHDBkRGRiItLa3Ez2/evFmVPhERERHVa3U8vqyyCm8ztXfvXmzZsgVjxoyBTCbDlClTMHnyZBgaGqJFixYICgqqiX4SERERUT1R4QD1P//5D6ZMmYIpU6YAAHr16oXZs2fj6NGj0NLSQmpqarV3koiIiKi+EAFQEYmq9KrrGdgKB6iJiYlo164dVFSKDs3LywMAaGhoYOLEifj++++rt4dERERE9YxIVLVXXVfhALVBg6JpqyKRCI0aNcLTp0+Fz5o2bYpnz55VX++IiIiIqN6pcIDaokULISi1t7dHdHQ08vLyUFBQgKioKJiYmFR7J4mIiIjqDxFEoqq96voyqwoHqB4eHrhw4QIAYPLkyYiLi4OLiwtcXV3x008/YdKkSdXeSSIiIqL6pLaH+O/fvw8/Pz84OjrC3d0dQUFBJW4t+k9jxoyBtbW1wishIaFC56/wNlPTp08X/uzu7o7vvvsOR44cgUgkgqenJzp27FjRJomIiIhISUilUowbNw7GxsYIDQ1FSkoKgoODkZaWhjVr1rz1eCcnJ3z66adyZaamphXqQ5WfJOXg4AAHB4eqNkNERERE/6NSiyud9uzZA6lUipiYGOjo6AAAVFVVERgYCH9/f1hYWJR5vEQigaOjY5X6UOEhfiIiIiKqWbU5xB8bGwt3d3chOAUALy8viMVinDp1qopXVj7lyqCOHTu23A2KRCJERERUukNEREREVHsSEhIwaNAguTKxWAwzM7NyzSU9f/48HB0dUVBQgHbt2mHWrFlwcXGpUB/KFaDKZLJyN1iRukREREQkrygLWrU0qEgEJCUl4aOPPiq1zokTJ0osl0qlkEgkCuUSiQTp6ellntfFxQX9+/dHy5Yt8fz5c4SHh2PChAmIjIxE+/bty93/cgWokZGR5W6QiErXwlCC1Jhptd0NohrT1GX62ysR1VHfBw9GSxPdd3IuZZyDKZPJ3ho4z5w5U+59t27d4OPjgw0bNmDLli3lPleVF0kRERERUfWqagYVAIyMjErNkpZFIpFAKpUqlGdkZLx1gdQ/aWpqwtPTE//9738rdJwyBuhEREREVEssLCwU5prm5ubiwYMHFQ5QgcpN/2SASkRERKRkVERVe1WFh4cH4uLikJqaKpQdP34cubm58PT0rFBbWVlZOHXqFOzt7St0HANUIiIiIiVTmwHq8OHDoa2tjYCAAJw+fRoxMTFYvnw5fH195TKon332GWxsbIT3f/zxB/z9/bF//37ExcXhxx9/xKhRo/DixQtMm1ax9Recg0pEREREAolEgoiICAQFBWHGjBnQ0NCAj48PAgMD5eoVFhaioKBAeK+np4fc3FysXbsWaWlpaNiwIdq3b4+lS5dW+KFOIhn3hSJ6J+Lj4yEDYNW2YsMcRHUJV/HTv1nxKv6KDldXRHx8PFKy8vDjc60qtfO+fiZ0NNVqtK81qdIZ1ISEBFy4cAGpqakYPHgw9PT08OzZMzRu3BgaGhrV2UciIiKieqWqw/R1XYUD1IKCAixatAgHDhwQ9sPy8PCAnp4elixZgrZt22LWrFk10VciIiIiqgcqvEhq48aNOHToEObOnYtDhw7JbR3QtWtXnD59ulo7SERERFTfFD1NqvKvuq7CGdQDBw4gICAAEyZMkJsYCwCmpqZ49OhRtXWOiIiIqD5S+TdEmVVQ4Qzqs2fP4OjoWOJn6urqyMzMrGqfiIiIiKgeq3CAqquri4cPH5b42f3792FoaFjlThERERHVVyIUBWhVedX1/GuFA1RPT09s2rQJz549E8pEIhEyMjIQGRmJ7t27V2sHiYiIiOqVKs4/FYlQ5yPUCs9BnTlzJmJjY9GvXz+4ublBJBJh7dq1uHv3Lho0aICAgICa6CcRERFRvcE5qBXUrFkz7N27F97e3rh+/TpUVVVx69YteHh4YM+ePWjSpEkNdJOIiIiI6otKbdTfrFkzLFu2rLr7QkRERET4d2wVVRWVfpIUEREREdUMPkmqgubPn1/m5yKRCCtWrKh0h4iIiIiofqtwgHru3DmFsrS0NGRlZUEikUBbW7taOkZERERUH4lQ9UVSdT0BW+EA9eTJkyWWnz17FkuXLsXXX39d5U4RERER1Wf1fQ5qhVfxl8bd3R2jR4/GF198UV1NEhEREVE9VK2LpCwsLBAfH1+dTRIRERHVO1wkVY0uXLiApk2bVmeTRERERPWMCKIqzyKt2xFuhQPU9evXK5Tl5eXh9u3biI2NhZ+fX7V0jIiIiIjqp2oJUMViMUxMTDBz5kwGqERERERVIaqGIf66nUCteIB669atmugHEREREaF4m6mqt1GXVWgVf3Z2Nj7++GP88ccfNdUfIiIionpPJBJV6VXXVShA1dDQwIkTJyCTyWqqP0RERERUz1V4H9Q2bdrgzp07NdEXIiIiIkLREH9VXnVdhQPUwMBAhIeH4/z58zXRHyIiIqJ6TySq2quuK9ciqQsXLsDGxgZaWlpYunQpMjMzMW7cOEgkEujr68vVFYlE+PHHH2uks0RERET071euAHXs2LGIioqCg4MDmjRpgiZNmtRwt4iIiIjqL5V/Qxq0CsoVoL65KCoyMrLGOkNERERU33GbqUrMQSUiIiIiqkkV3qifiIiIiGpWPR/hL3+AOm7cuHJt/CoSiXDx4sUqdYqIiIioPlOp84P0VVPuANXV1RU6Ojo12RciIiIiovIHqNOmTYODg0NN9oWIiIiIqmMv0zqegOUcVCIiIiIl8294GlRVMEAlIiIiUiJF20xVLUKt6/Ett5kiIiIiIjn379+Hn58fHB0d4e7ujqCgIGRnZ1eojePHj8Pa2ho+Pj4VPn+5Mqi3bt2qcMNEREREVDm1uc2UVCrFuHHjYGxsjNDQUKSkpCA4OBhpaWlYs2ZNudrIzs5GcHAwmjVrVqk+cIifiIiISMnU5qNO9+zZA6lUipiYGGEHJ1VVVQQGBsLf3x8WFhZvbWPz5s0wNjaGqakprl27VuE+cIifiIiIiASxsbFwd3eX217Uy8sLYrEYp06deuvxDx48wPbt27Fw4cJK94EBKhEREZGSEYmq9qqKhIQEhSypWCyGmZkZEhIS3nr8F198gf79+6NNmzaV7gOH+ImIiIiUiAhVzyCKACQlJeGjjz4qtc6JEydKLJdKpZBIJArlEokE6enpZZ735MmTuHTpEo4dO1aR7ipgBpWIiIiI3komk5X52PucnBysWLECM2bMqPLTR5lBJSIiIlIyZQWC5WVkZFRqlrQsEokEUqlUoTwjI6PMBVIRERFQUVGBt7e3cHxeXh4KCwshlUqhoaEBsVhcrj4wQCUiIiJSMrW50b6FhYXCXNPc3Fw8ePAAgwYNKvW4v/76C4mJiXB3d1f4zMXFBZ9//jlGjBhRrj4wQCUiIiIigYeHBzZu3IjU1FQ0bdoUQNGm+7m5ufD09Cz1uEmTJmHgwIFyZd9++y3u37+P4OBgtGzZstx9YIBKREREpExEIqhUOYda+eOHDx+OXbt2ISAgAAEBAXj58iVWrlwJX19fuSH+zz77DDExMbhx4waAoszrP6cAHDhwAM+ePYObm1uF+sAAlYiIiEjJ1OYQv0QiQUREBIKCgjBjxgxoaGjAx8cHgYGBcvUKCwtRUFBQI30QyWQyWY20TERy4uPjIQNg1da+trtCVGOaukyv7S4Q1ZjvgwejpYku7O1r7t/x+Ph4vMotwH1R5R4RWsxcloxGYtUa7WtN4jZTRERERKRUOMRPREREpGSqvM1UHR8fZ4BKREREpESq60lSdRmH+ImIiIhIqTCDSkRERKRkquNJUnUZA1QiIiIiJVO/w1MO8RMRERGRkmEGlYiIiEjJcIifiIiIiJRKfR/iru/XT0RERERKhhlUIiIiIiUiQtWH+Ov6BAEGqERERERKpq4HmFXFAJWIiIhIydTzNVKcg0pEREREyoUZVCIiIiIlo1LPB/kZoBIREREpGQ7xExEREREpEWZQiYiIiJSMiEP8RERERKQ0RNUwxF/H41sO8RMRERGRUmEGlYiIiEiJiFD1Vfx1PIHKAJWIiIhI2XAVPxERERGREmEGlYiIiEjJ1PcMKgNUIiIiIqUiqoZtpup2hFvrAWpYWBjWr18PABCJRNDS0oKxsTFcXFwwatQoWFhY1HIPa865c+cwduxY7N27F/b29rXdnUp59OgRevbsia+//hp9+vQBAOzYsQPm5ubw9PSUqztmzBhoampi8+bNNdqnN++p0piYmODkyZOYN28eDhw4oPB5ly5dEB4eXlNdJCIiKpNK3Y4vq6zWA1QA0NDQQEREBAAgMzMTd+7cQVRUFL7//nt88cUX6N+/fy33sGbY2toiKiqqTgfh+vr6iIqKQsuWLYWynTt3olu3bgoB6pIlS6CiUvPTnocMGYKuXbsK76Ojo3Ho0CHhHgMAsVgs/Ll58+ZYs2aNXBva2to13k8iIiIqmVIEqCoqKnB0dBTed+7cGSNHjsTkyZOxYMECODk5oXnz5rXXwRrSqFEjueuui8RicbmvoXXr1jXbmf8xNDSEoaGh8P706dMK99ibNDQ06vzfAxER/XuIUPUnSdX1BKzSruJXV1fHokWLkJeXh+joaKG8sLAQmzZtQo8ePWBnZ4fevXtjx44dcsc+ffoUs2bNQqdOnWBvb48ePXpgxYoVwucJCQmYPXs2PD090a5dO/Tr1w/btm1DYWGhQjtTpkyBg4MDunbtiq1bt2LZsmXo0aOHQr3AwEC4ubnBwcEBo0aNwrVr1956jefOnYO1tTXi4+OFMmtra2zZsgWhoaHo1KkT3NzcMH/+fGRlZQl1pFIpFi5ciK5du8Le3h6enp6YPXt2hfvUo0cPLFu2DLt27UL37t3RoUMHBAQEICUlRaiTl5eHL7/8Et27d4ednR26dOmCqVOnIiMjA0DREL+1tTWOHTsmtPn48WPs3r0b1tbWsLa2xv79+wEUDfFPmTJF7tr/2aeCggJ06tQJq1atkvv78vf3R4cOHeDo6IjJkyfjwYMHb/1+q8vevXvh7e0NBwcHuLm5YcSIEbh69eo7Oz8REdU/IlHVXnWdUmRQS9O6dWsYGBjg0qVLQtmqVasQERGBKVOmwNnZGWfOnEFwcDAyMzMxbdo0AMDcuXPx/PlzLFy4ELq6ukhKSpILhJ4/fw5zc3P4+vpCS0sLN2/eRFhYGLKysjB9+nQAgEwmQ0BAAJKTk7Fs2TJoa2tj69atePLkCVRVVYW20tPTMXLkSGhqamLRokXQ1tZGZGQkxo0bh59++gm6uroVvu7du3ejQ4cOWLlyJe7fv4/Vq1dDV1cXgYGBAIDg4GCcPn0aH3/8MUxMTPDixQvExsZWqk8nT55EYmIiFi9ejNTUVKxYsQLLly9HSEgIAGDz5s3Ys2cPAgMDYWlpidTUVJw5cwa5ubkl9n39+vWYPHkynJycMHHiRACAmZmZQj0XFxcYGBjg8OHDsLOzE8rj4uLw8uVL+Pj4AAAePnyI4cOHw9LSEitXroRIJMKmTZswfvx4HDt2TG6oviry8/Pl3jdoUPSfxoULF7BgwQJMnDgRnp6eyM7OxtWrV4UAnYiIiKqfUgeoAGBkZITk5GQAQEpKCnbt2oUJEybgo48+AlC0mCUzMxNbt27F+PHjoaWlhfj4eMyZMwf9+vUT2hkwYIDwZ3d3d7i7uwMoCkQ7dOiA7Oxs7Nq1SwhQY2Njcf36dezevRvOzs4AADc3N3h4eKBJkyZCWxEREZBKpYiOjhYCP3d3d/Tq1Qvh4eGYO3duha+5WbNm+OqrrwAAHh4eiI+Px3//+18hQI2Pj4ePjw8GDhwoHOPt7V2pPslkMmzcuFEI9BITExEeHo7CwkKoqKggPj4eXbp0wahRo4RjvLy8Su27jY0NxGIxmjVrVuawuYqKCvr164cjR45g7ty5EP3v171Dhw7B3NwcNjY2AIoCXolEgu3bt0NdXR0A4OTkhJ49eyI6OlquX5V19+5d2NraypUV/71fvXoVTZo0waeffip81q1btyqfk4iIqCxVX8Vftyl9gCqTyYTg5erVq8jLy5MLPIGi4CwqKgo3b96Es7MzbGxssG3bNqiqqqJz585o0aKFXP2cnBxs3rwZBw8eRFJSEvLy8oTPMjMzhSBXIpEIwSlQNGfUzc0Nt2/fFsrOnDkDNzc3NG7cWMjCqaiowNnZWRi6LywslJs+oKKiUuZioc6dO8u9b926Nf773/8K721sbHDgwAHo6emha9eusLKykqtfnj4Vc3FxkctCtm7dGnl5eXj58iX09PRgY2OD8PBwhIWFwdPTE3Z2dtW20Mnb2xvbt2/HxYsX4ezsjNzcXPz8888YN26c3LX069cPqqqqwrVIJJISpwdUlpmZGdauXStX1qpVKwBF33VaWhrmzZsHX19fODk5oWHDhtVyXnq7V1k5+GLjQcT8/CdSpVmwbGGAj8b3wqDezmUe9/hZKtbvOoGrtx/h2t3HkL56jW8Wj8ZI344KdY+djkfMz5dw9fZD3P37GfILCpF6oexdIIiqi1ZDMRb4+2LAe05oKtHE3cRnWLfjOPYfv/jWY7t0sMScCV6wszRBQw0xEh8nY+cPv2NrdCwKC2VCPbUGqgj064NhfV1gpN8Ez5Kl2PvfP7Bq61Fk5+SVcQaqTVzFr+SePn0qrBBPT08HAOjp6cnVadasGQAgLS0NABASEoKQkBCsW7cOS5cuhbm5OebMmYPevXsDAFavXo3o6GhMmzYNdnZ20NbWxokTJ7Bx40bk5ORAS0sLz58/h46OjkJ//jlkn5qaisuXLytk4ID/H9r+5ptv5LY9mj59OmbMmFHqNUskErn3ampqckPqixYtQuPGjbF9+3asWrUKRkZGmDx5MkaOHFnuPpV1LqAoiAcAf39/qKio4MCBA1i/fj10dHQwatQoTJs2TfjFobLs7e3RsmVLHDp0CM7OzoiNjYVUKhWG94uvJSIiQm4FfjENDY0qnb+Yurp6qdt8ubu7Y9WqVdi5cyf8/Pygrq4OLy8vfPbZZ3KZdKoZY+duwZ83ErFken+0NtPH3mN/4MMFO1BYKMOQPi6lHnf/0QtEH/sD9lYm6NXZBvv+W/oP+8O/XsUf1+7Dwao51MUNcPnmw5q4FKIS7Vw1CU42LbB0/Q+49+A5BvdxRviKCVBREWHvf/8o9ThPV2vsC52G3y/dw6wv/oOs7Fz07WqPLwOHwNy0GeZ/tU+ou/WLCejVyQarw4/hzxuJcLE3R+BEL7RpZYSRH9fstn9ElaXUAerdu3fx7NkzYSi7OCBITk6GgYGBUK94CkDx5/r6+ggODkZhYSGuXbuGjRs3Yvbs2Th27BiaN2+OY8eOYdiwYZg8ebLQxqlTp+TOra+vL7dYqNjLly/l3jdu3Bhdu3bFrFmzFOoWZyaHDh0qNyysr69fzm+gZNra2liwYAEWLFiA27dvY+fOnVi6dCksLS3h4uJSrj6Vl1gsxowZMzBjxgwkJiZi3759CAsLg6mpqdy0icry9vbGd999h4ULF+LIkSOwtbWV27KqcePG8PT0FILvN2lpaVX5/OXRv39/9O/fHykpKThx4gSCg4PRoEEDuYV3VP1+OnMdv5y7hS1B4zHYqyhj2tXZCg+fpmBJaAw+6NUBqqolZ/M7tW+Ne8dXAgAu3UgsM0D9esEIYVTgk1XfM0Cld6ZXJxv06NgWHy7Yjn0/Fd2jv128i+aGOlg6cwD2H78olwl900gfN+TlF2D47E3Iyi5KYJw6fxutWxhgpE9HIUB1tmuJ93s4YkHIfmz4z0mhXkFBIRZPex/dXNvg1/O33sHVUkXV9yF+pV3Fn5OTg+XLl0MsFmPIkCEAijJuampqOHr0qFzdI0eOQFNTU5i3WExFRQUODg746KOPkJ+fj8TERKHt4kwhULRy/PDhw3LH2tvbQyqV4sKFC0LZq1evcO7cObl6nTp1QkJCAiwsLGBvby/3sra2BgAYGBjIlb8ZXFeVtbU15s+fDwD466+/yt2nymjRogXmzJmDJk2aCOcqiZqampCBfRtvb2+kpKTgl19+wS+//AJfX1+5z93d3XH37l3Y2NgoXEvxMPy7oqOjgyFDhqBz585lXj9Vj8O/XEEjTXUM6Nlernykb0ckvUjHH9f+LvXYikxDeRd78xKVxLt7O2RkZiPmxCW58v8cjIOxfhM427Us9di8/ELk5uXj9T+G6NNfZckN27u1K/p38viZ63L1/nu6aIrU+z0cq3AFVJO4il8JFBYW4vLlywCArKwsYaP+hw8fYuXKlTA1NQVQFCCMGTMG27Ztg1gshpOTE86ePYuoqCjMmDEDmpqayMjIgJ+fH/r37w9zc3Pk5eUhMjISEolECGA7deqE6OhotG7dGjo6Oti9e7fCqnQPDw/Y2tri448/xpw5cyCRSLBlyxZoa2vLDW2PHz8eBw8exOjRozF27FgYGxsjJSUFV65cgYGBAcaPH1/t39fw4cPRq1cvWFpaQlVVFTExMVBTUxPmy1ZnnwICAmBrawsbGxs0bNgQv/zyC9LS0tCxo+JcvmKtWrVCXFwczpw5A4lEAlNTUzRt2rTEuhYWFrCxscGyZcvw+vVrhfnFM2fOxODBg+Hn54ehQ4eiWbNmSE5Oxvnz5+Hs7Cw3HaAmhIaGIi0tDa6urtDV1cWdO3dw+vTpGvl7JXk3/3oCq5aGaNBAVa7ctrVJ0ecJT4QfvkR1UdtWxrjz91MUFMhvcXj93uOizy2Mcf7q/RKP3b7vNAb17oAvAwdj7faf/jfEbwefbu2w7JsfhXpitaIf8zl58juVFL+3tTSutuuhf5f79+8jKCgIFy9eRMOGDeHt7Y3AwMC3Tq9bvXo1fv31Vzx58gQikQjm5uaYOHGi3GLu8lCKADU7OxvDhg2DSCSCpqYmTExM4O7ujvXr1ys8ZemTTz6BRCJBdHQ0vv32WxgZGWHevHlCwKCurg4rKytERkYiKSkJGhoasLOzQ3h4uDCndNGiRViyZAmWL1+Ohg0bYuDAgejVqxcWLlwonEckEmHDhg1YvHgxFi9eDIlEgrFjx+Lu3bu4e/euUK9p06aIiorCunXrsGbNGqSlpUFXVxft2rVDr169auT7cnJyQkxMDB49egQVFRVYWVlh06ZNwndVnX1ycnLC0aNHsX37dhQUFMDc3BxfffUVOnXqVOoxc+bMweeff44ZM2YgMzMTwcHB+OCDD0qt7+3tjdWrV8PV1VUhu9yiRQtER0cL84mzsrKgp6cHFxeXKmWDy8ve3h4RERE4evQoXr16BUNDQ/j5+cHf37/Gz13fpaRnoqVxM4Xypo01hc+J6jKdxlr4+0myQnlqepbweWkuXk9E/4BQbA/2w6ShRU/ty88vwLJvfsQ3u08K9W79lQQA6NiuFR48+f8pah3/98td0zLOQbWrNpOgUqkU48aNg7GxMUJDQ5GSkoLg4GCkpaUpPHnxn16/fo3hw4fD3NwcMpkM//3vfzFnzhwUFhYqjJKWRSSTyUqe4EIKcnNz0bdvX7i6uiI4OLi2u0N1THx8PGQArNqWvCCL5DkPWoqWJnrYGxogV/40OR1t+y7A4mnvY/b43m9t59KNRPQYt7rUVfxv+mTV99gaHctV/FXQ1GV6bXehzriwdzH+fvwCQ2ZtlCs30JXg1rEVWLr+B6yLOF7ise3aNMf36/xx8XoiIg6cQebrHHi4WGHmmPewZtt/sSa86OEpag1UcTZqARpqiDFt6a7/LZJqiW+WjIFuYy0kPHyBjkODavxa/y2+Dx6Mlia6pS6srQ7x8fHIyStEnqTF2yuXQU2aCHU1lUr19dtvv8WGDRtw8uRJIbl38OBBBAYG4siRIxV+RPvw4cOhqamJbdu2lfsYpcigKquoqCgUFhbC3NwcUqkU3333HZKSkkpcsENE1UunsRZSS8iSFmeXmko033WXiKpVSnpmiRnM4lGCVGmWwmfFVs8dihcpGRj9ybfCQqrfLt5FYaEM8yb1Q/SxC0h8/BJ5+QUYMmsDNi0dhwPfFP3y8CorB8s3/IhP/Pog6UVa9V8YVYvazKDGxsbC3d1dbjej4h1sTp06VeEAtUmTJsjMrNioFwPUMqirq2PLli149OgRAKBNmzbYvHlzjf7mRERFbCyMse+ni8jPL5Cbh3oj4f/n5xHVZTcSnmBQ76LdKN6ch2pj8f/zrEtjb2WKfT8prvK/dOMBVFVVYN3SEImPi4b07z9KhpffVzDSa4ymEi3cf/QCkkYN8WXgEPx+6V4NXBnVdQkJCRg0aJBcmVgshpmZGRISEt56vEwmQ0FBAbKysnDy5EmcOXMGq1evrlAfGKCWYcCAAdWylRIRVZx3t3aIiPkdP568jA96dxDKvzt0HkZ6jctc4UxUFxz+9QrGD+yM93s44sDxP4XyET6uePI8rcydKp4mp6N9WzOoqIjkglQXe3MAwJPnaQrHJL1IR9KLov3EF/j74lVWDnb9cLZ6LoaqlwhVT6GKgKSkJOHJmyU5ceJEieVSqVRhn3SgaO/04j3py3L27FlMmDABQNGjwxctWoQ+ffqUr9//wwCViJRSr8626O7WBh9/GYWMzGy0aq6Hff/9AyfO3sDmZeOEPVBnLN+N7w6fw58HPoeZ0f8PR/3wv617/n5ctAjl0s0H0NIselxu/ze2rnqQlIJLN4q2oLv/KFnuWDMjHbS3qdo8MKLS/Pz7DZyMu4mvPh0GbS0N/PXwBQZ5OeO9TraYvGiHEHiGLhyJEd5ucBr4OR4+TQUAbPjPL1j1yRDsWTsV2/f/htfZefB0tcK0UT3xy7lbuHb3sXCemWPew7OXUjx6mgp9XW0MeM8J3p4OmLpkpxCwkvJRxn1Q33y6Z1kcHBywd+9evHr1CrGxsVi+fDlUVVWFbUPLgwEq1ZqUlBSsW7cOp0+fRkpKCkxMTDBs2DCMHTtW+A/g8ePHWL58OW7cuIHU1FQ0btwYTk5OmD17NszNzeXaK++WGKdOnUJISAgSEhJgaGiI8ePHY9SoUXJ1du/ejdjYWFy5cgWpqan4+uuvK/zbH1XdzlWTELThIII3Hy561GlLA2z9Yrzco04LCgqLhkf/sd5z/Lxwufdbo2OxNToWAOQWQf32xx1MW7arxGNHeLthw+djqvWaiN40du4WLAzwxfwp3kWPOv37Gfw+2y73qFNVVZWiaS5vBAZbvj+FpBdpCBjRHaELR0JDXYyHSS/x5ZYj2PifX+TOoa7eAHM/7Atj/SbIzsnDH9fuw3fq1zh7+e1DtVS3GRkZlZolLYtEIoFUKlUoz8jIKNf800aNGgnTId3d3ZGbm4uVK1figw8+gKqq6luOLsJV/FRrRo0ahcTERMyePRvGxsb4/fff8e2332LevHnC0MDdu3exc+dOYQuq58+fY/PmzUhLS8MPP/wgTOAufkSqsbExAgIChC0xunbtKrclxqVLlzB69Gj0798f77//Pv7880+EhYVh2bJlcr/ZDR06FABgbm6OmJiYaglQuYqf6gOu4qd/s3e2ij+/EIWNW1apHZX0v6HeoHKr+EePHg1tbW1s3Pj/O0zk5uaiQ4cOmD17NiZOnFih9vbv34/58+fjt99+U3hcfWmYQaVa8fTpU/zxxx9YsWKFMBHb3d0dt27dwpEjR4QA1dLSEsuXL5c71s7ODl5eXjhz5oywp9qePXsglUoRExMjBK2qqqoIDAyEv7+/8BvfN998AxsbG+ExpR07dkRSUhK+/vprDBo0SHiq0J49e6CiooJHjx4hJiamxr8PIiKiN9XmAL+Hhwc2btyI1NRU4UE7x48fR25uLjw9PSvc3sWLF9GoUaNSH9pTEj7jj95q3rx58PHxwblz5zBgwAA4Ojpi8ODBuHbtWqXbzMsrehSftra2XLlEIsHbkvpNmjQBAOTn//+TUUrbEkMsFuPUqVMAin77i4uLU3iaha+vL168eIEbN24IZXz8JRER1VfDhw+HtrY2AgICcPr0acTExGD58uXw9fWVG+L/7LPP5B4zf+vWLXz44YfYu3cvzp49ixMnTmDhwoXYu3cvpkyZggYNyp8XZQaVyuXFixcICgrC5MmT0ahRI3z11VeYPn06jh8/DjU1NRQUFLw1sBSJRMLck+bNm6Nz587YtGkTWrZsCWNjY5w9exbHjx/HsmXLFI4tLCxEQUEBnj17hpCQEBgZGeG9994TPi/PlhgPHjxAXl4eWrWSfzxm69athTbs7Owq/uUQERFVt1pMoUokEkRERCAoKAgzZsyAhoYGfHx8EBgYKFev+GdzsWbNmkEikWDDhg148eIFtLW10apVK3zzzTdyP7PLgwEqlUt6ejp27doFS0tLAEV7xE6YMAFXrlyBs7Mzxo8fj/Pnz5fZhomJCU6e/P9H8K1fvx6zZ88WhulFIhE++eSTErf2mjt3Lg4ePAgAMDMzw/bt2+Wyr+XZEqP4//9Zr/h9ebbOICIiehdqexW/ubk5wsPDy6yzcuVKrFy5UnjfrFkzrF27tlrOzwCVykVfX18ITgEIKf5nz54BAJYuXfrWp0SIxWLhzzKZDPPnz8fff/+NNWvWwMDAAOfPn0dISAgkEonCVhSzZs3C2LFjkZSUhB07dmDChAn4z3/+A2PjsjdrL2lLjNK2yCjP1hlERERU8xigUrn8M+uopqYGAMjJyQEAtGjRolxD/MV+/fVXHDt2DD/88APatGkDAHB1dYVUKsWqVavkFiwBRVMCmjdvDgcHB3Tt2hW9evXC1q1bsXjxYqF/b9sSo3HjxgAUM6XFx5WUgSUiInrXRML/1F8MUKlaVHSI/969e1BVVYW1tbVcnbZt2yIiIgJpaWlyC57epKmpiVatWiExMVEos7CwUHj8Wm5uLh48eCDMTTUzM4Oamhr++usveHh4CPXu3bsntEFERKQMqhqf1vU9RBmgUrWo6BC/iYkJCgoKcPPmTbkVgNeuXYOmpmaZW1FIpVLcuXMHffv2FcrKsyWGWCxGx44dcfToUYwfP1449tChQ9DT05PrBxERUa1iBpWo6v65Mv5tPD09YWJiglmzZmHatGkwMDBAXFwcvvvuO0ycOFGYDhAWFoaMjAw4OTlBR0cHjx8/RkREBPLz8zFu3DihveHDh2PXrl0ICAhAQEAAXr58iZUrVypsiTFt2jSMHj0aCxcuhK+vL/78809ER0dj2bJlclMK4uPj8fjxY6SkpAAArly5AgDQ0dGBq6trpb8nIiIiejsGqFQrtLS0EBERgZCQEKxduxbp6ekwNTXFnDlz5AJPGxsb7NixAz/88AOysrJgYGAAFxcXhIWFoXnz5kK98m6J0b59e2zYsAFr165FTEwMDA0NsXDhQoVFWbt378aBAweE99u2bQNQNE82MjKyJr4SIiKi/xFVeRW/rI6nYPmoU6J3hI86pfqAjzqlf7N39ajT3HwZVHXNq9ROwcv7EDcQ1WhfaxIfl0NERERESoVD/ERERERKpm4P0FcdA1QiIiIiZVPPI1QO8RMRERGRUmEGlYiIiEjJVHUVf13HAJWIiIhImYgAUVXj0zoe33KIn4iIiIiUCjOoREREREqmjidAq4wBKhEREZGyqecRKgNUIiIiIiUiQtUXSdX1+JZzUImIiIhIqTCDSkRERKRkqryKv45jgEpERESkZOp5fMohfiIiIiJSLsygEhERESmbep5CZYBKREREpGTq+6NOOcRPREREREqFGVQiIiIiJcNV/ERERESkVOp5fMohfiIiIiJSLsygEhERESmbep5CZYBKREREpGTq+yp+BqhERERESqa+L5LiHFQiIiIiUirMoBIREREpERGqPgW1ridgGaASERERKZu6HmFWEYf4iYiIiEipMINKREREpGRqexX//fv3ERQUhIsXL6Jhw4bw9vZGYGAgNDQ0Sj3m1atX2L59O2JjY3H//n00aNAAtra2mDNnDmxtbSt0fmZQiYiIiJSJqGgVf1VeVYlvpVIpxo0bh8zMTISGhuLTTz/FwYMHsXDhwjKPe/LkCaKiotCpUyeEhIQgODgYhYWFGD58OK5fv16hPjCDSkRERESCPXv2QCqVIiYmBjo6OgAAVVVVBAYGwt/fHxYWFiUeZ2pqiuPHj6Nhw4ZCWadOndCzZ0/s2rULwcHB5e4DM6hERERESkZUxVdVxMbGwt3dXQhOAcDLywtisRinTp0q9ThNTU254BQA1NXVYWFhgefPn1eoDwxQiYiIiJRNLUaoCQkJCllSsVgMMzMzJCQkVKitrKws3Lx5E61atarQcRziJyIiIvoXSkpKwkcffVTq5ydOnCixXCqVQiKRKJRLJBKkp6dXqA/r1q3D69evMXr06AodxwCViIiISMnU9ir+kshkMogq8AzWgwcPIiIiAosXL0aLFi0qdC4GqERERERKpgJxYKmMjIxKzZKWRSKRQCqVKpRnZGSUukDqn86cOYP58+fDz88Po0aNqnAfOAeViIiISMnU5iIpCwsLhbmmubm5ePDgQbkC1KtXr2L69Ono06cPPvnkk0r1gQEqEREREQk8PDwQFxeH1NRUoez48ePIzc2Fp6dnmccmJCRg0qRJcHJyQnBwcIWmBLyJASoRERGREhGh6hv1VyWLOnz4cGhrayMgIACnT59GTEwMli9fDl9fX7kM6meffQYbGxvh/cuXL+Hn5wc1NTV8+OGHuH79Oi5fvozLly/jxo0bFeoD56ASERERKZ3aWyQlkUgQERGBoKAgzJgxAxoaGvDx8UFgYKBcvcLCQhQUFAjv7927h6SkJADA+PHj5eqamJjg5MmT5e4DA1QiIiIikmNubo7w8PAy66xcuRIrV64U3ru5ueH27dvVcn4GqERERERKpjpW8ddlDFCJiIiIlEw9j0+5SIqIiIiIlAszqERERERKhkP8RERERKRERNXwqNO6HeFyiJ+IiIiIlAozqERERETKpm4nQKuMASoRERGRkqnn8SkDVCIiIiKlIqqGRVJ1PMLlHFQiIiIiUirMoBIREREpmaqv4q/bGKASERERKZv6HZ9yiJ+IiIiIlAszqERERERKRISqJ1DregKWASoRERGRkqnvjzrlED8RERERKRVmUImIiIiUDFfxExEREZFS4RA/EREREZESYYBKREREREqFQ/xERERESqa+D/EzQCUiIiJSMvV9kRSH+ImIiIhIqTCDSkRERKRkOMRPREREREqDjzrlED8RERERKRlmUImIiIiUTV1PgVYRA1QiIiIiJcNV/ERERERESoQZVCIiIiIlw1X8RERERKRU6nl8yiF+IiIiIlIuzKASERERKRNuhMoAlYiIiEjZ1PdV/AxQiYiIiJQMF0kR0TuRl5cHmUyGOzfja7srRDXm++DBtd0Fohqjr6OFvLy8Gj9PXm5ulX9W5OXmQiwWV1OP3j0GqETviOh/vw7X81+K6V+upYlubXeBqMbk5eUJ/5bXlOoKKsVicZ0OUEUymUxW250gIiIiIirGbaaIiIiISKkwQCUiIiIipcIAlYiIiIiUCgNUIiIiIlIqDFCJiIiISKkwQCUiIiIipcIAlYiIiIiUCgNUIiIiIlIqDFCJiIiISKkwQCUiIiIipcIAlYiIiIiUSoPa7gARlS4sLAzr168HAIhEImhpacHY2BguLi4YNWoULCwsarmH9cOjR4/Qs2dPfP311+jTpw8AYMeOHTA3N4enp6dc3TFjxkBTUxObN2+u0T69eW+UxsTEBCdPnsS8efNw4MABhc+7dOmC8PDwmuqioD7fx+fOncPYsWOxd+9e2Nvb13Z3KoX3P9UGBqhESk5DQwMREREAgMzMTNy5cwdRUVH4/vvv8cUXX6B///613MN/P319fURFRaFly5ZC2c6dO9GtWzeFH9BLliyBikrND04NGTIEXbt2Fd5HR0fj0KFDwr0CAGKxWPhz8+bNsWbNGrk2tLW1a7yfxerrfWxra4uoqKg6HYTz/qfawACVSMmpqKjA0dFReN+5c2eMHDkSkydPxoIFC+Dk5ITmzZvXXgfrAbFYLPd3UJbWrVvXbGf+x9DQEIaGhsL706dPK9wrb9LQ0Cj3NdSE+nofN2rUqFa/9+rA+59qA+egEtVB6urqWLRoEfLy8hAdHS2UFxYWYtOmTejRowfs7OzQu3dv7NixQ+7Yp0+fYtasWejUqRPs7e3Ro0cPrFixQvg8ISEBs2fPhqenJ9q1a4d+/fph27ZtKCwsVGhnypQpcHBwQNeuXbF161YsW7YMPXr0UKgXGBgINzc3ODg4YNSoUbh27dpbr1EqlWLhwoXo2rUr7O3t4enpidmzZ1e47R49emDZsmXYtWsXunfvjg4dOiAgIAApKSlCnby8PHz55Zfo3r077Ozs0KVLF0ydOhUZGRkAioY4ra2tcezYMaHNx48fY/fu3bC2toa1tTX2798PoGiIc8qUKQCKhnetra0V+lRQUIBOnTph1apVct+7v78/OnToAEdHR0yePBkPHjx46/dUXfbu3Qtvb284ODjAzc0NI0aMwNWrV2v0nPXhPi6+B+Lj44Uya2trbNmyBaGhoejUqRPc3Nwwf/58ZGVlCXV4///7738qGzOoRHVU69atYWBggEuXLgllq1atQkREBKZMmQJnZ2ecOXMGwcHByMzMxLRp0wAAc+fOxfPnz7Fw4ULo6uoiKSlJ7gfI8+fPYW5uDl9fX2hpaeHmzZsICwtDVlYWpk+fDgCQyWQICAhAcnIyli1bBm1tbWzduhVPnjyBqqqq0FZ6ejpGjhwJTU1NLFq0CNra2oiMjMS4cePw008/QVdXt9TrCw4OxunTp/Hxxx/DxMQEL168QGxsbKXaPnnyJBITE7F48WKkpqZixYoVWL58OUJCQgAAmzdvxp49exAYGAhLS0ukpqbizJkzyM3NLbFv69evx+TJk+Hk5ISJEycCAMzMzBTqubi4wMDAAIcPH4adnZ1QHhcXh5cvX8LHxwcA8PDhQwwfPhyWlpZYuXIlRCIRNm3ahPHjx+PYsWNyQ5VVkZ+fL/e+QYOiHwEXLlzAggULMHHiRHh6eiI7OxtXr14VApSa9G+/j0uze/dudOjQAStXrsT9+/exevVq6OrqIjAwEADv//py/1MZZESktEJDQ2WOjo6lfj506FBZnz59ZDKZTPby5UuZra2t7Msvv5Srs2jRIpmjo6Ps1atXMplMJnN0dJTt3LmzXOcvLCyU5eXlyTZu3Cjr3LmzUP7rr7/KrKysZBcuXBDKMjIyZO3bt5d1795dKPv6669lHTp0kCUnJwtlOTk5Mg8PD4V+/pO3t7csODi41M/L23b37t1lHh4espycHKFs7dq1MltbW1lBQYFMJpPJJk+eLJs+fXqp53r48KHMyspKdvToUbl2ly5dqlB39OjRssmTJwvvg4ODZR4eHrLCwkKhbN68eTIvLy/h/dy5c2U9evSQZWdnC2UvX76UOTo6ynbt2lVqv95U1r3y6aefyqysrBRexX9/W7dulbm6upbrPJVRn+/juLg4mZWVlezq1atCmZWVlWzQoEFy9T7++GPZe++9J7zn/f/vuf+pcjjET1SHyWQyiEQiAMDVq1eRl5eHfv36ydXx9vZGVlYWbt68CQCwsbHBtm3b8J///AeJiYkKbebk5CA0NBS9evWCvb09bG1tERISghcvXiAzMxMAEB8fD4lEAmdnZ+G4Ro0awc3NTa6tM2fOwM3NDY0bN0Z+fj7y8/OhoqICZ2dnYcizsLBQ+Cw/P18YgrWxscGBAwcQHh6OO3fuKPSzPG0Xc3FxkcvCtG7dGnl5eXj58qVwrlOnTiEsLAxXr15VGAauCm9vbzx9+hQXL14EAOTm5uLnn38WskfF19KzZ0+oqqoK1yKRSEocHq0sMzMz7N27V+7Vtm1bAEXXn5aWhnnz5uHMmTN4/fp1tZyzvP7N93FpOnfuLPe+devWePr0qfCe93/9uf+pZBziJ6rDnj59KqysTU9PBwDo6enJ1WnWrBkAIC0tDQAQEhKCkJAQrFu3DkuXLoW5uTnmzJmD3r17AwBWr16N6OhoTJs2DXZ2dtDW1saJEyewceNG5OTkQEtLC8+fP4eOjo5Cf/451JmamorLly/D1tZWoW7xkOA333wjt13M9OnTMWPGDCxatAiNGzfG9u3bsWrVKhgZGWHy5MkYOXJkudsuJpFI5N6rqakBKApiAMDf3x8qKio4cOAA1q9fDx0dHYwaNQrTpk0TAqfKsre3R8uWLXHo0CE4OzsjNjYWUqlU7gd0amoqIiIi5FYgF9PQ0KjS+Yupq6uXus2Ru7s7Vq1ahZ07d8LPzw/q6urw8vLCZ599hiZNmlTL+cvyb76PS1PSPfnmkDrv/yL14f6nkjFAJaqj7t69i2fPnmHgwIEAIPxDmpycDAMDA6FecnKy3Of6+voIDg5GYWEhrl27ho0bN2L27Nk4duwYmjdvjmPHjmHYsGGYPHmy0MapU6fkzq2vry+3yKJYcUamWOPGjdG1a1fMmjVLoW5xRmfo0KHo1q2bXNtA0RYwCxYswIIFC3D79m3s3LkTS5cuhaWlJVxcXMrVdnmJxWLMmDEDM2bMQGJiIvbt24ewsDCYmppiwIABFWqrJN7e3vjuu++wcOFCHDlyBLa2tnJb9jRu3Bienp5C8PEmLS2tKp+/PPr374/+/fsjJSUFJ06cQHBwMBo0aCC38Kgm/Nvv48ri/V/k337/U+kYoBLVQTk5OVi+fDnEYjGGDBkCoChToaamhqNHj8plVY4cOQJNTU3Y2NjItaGiogIHBwd89NFHwiKK5s2bIycnR8iwAEUrbg8fPix3rL29PaRSKS5cuAAXFxcAwKtXr3Du3Dm5jEOnTp3w448/wsLCApqamiVei4GBgVwgUhJra2vMnz8fe/fuxV9//QUXF5dytV0ZLVq0wJw5cxAVFYW//vqr1HpqampCBuptvL298c033+CXX37BL7/8gpkzZ8p97u7ujrt378LGxkZucU5t0NHRwZAhQxAbG1vm9VeH+nYfVxbv/3fnXd7/VDYGqERKrrCwEJcvXwYAZGVlCRucP3z4ECtXroSpqSmAon9Yx4wZg23btkEsFsPJyQlnz55FVFQUZsyYAU1NTWRkZMDPzw/9+/eHubk58vLyEBkZCYlEIvzg79SpE6Kjo9G6dWvo6Ohg9+7dCqt5PTw8YGtri48//hhz5syBRCLBli1boK2tLTckOH78eBw8eBCjR4/G2LFjYWxsjJSUFFy5cgUGBgYYP358qdc9fPhw9OrVC5aWllBVVUVMTAzU1NSE+YJVafufAgICYGtrCxsbGzRs2BC//PIL0tLS0LFjx1KPadWqFeLi4nDmzBlIJBKYmpqiadOmJda1sLCAjY0Nli1bhtevXyvMr5w5cyYGDx4MPz8/DB06FM2aNUNycjLOnz8PZ2dnueHQmhAaGoq0tDS4urpCV1cXd+7cwenTpyv0Hb5Nfb2PK4v3/7/r/qeKY4BKpOSys7MxbNgwiEQiaGpqwsTEBO7u7li/fr3C02k++eQTSCQSREdH49tvv4WRkRHmzZsn/EOrrq4OKysrREZGIikpCRoaGrCzs0N4eLgwF2/RokVYsmQJli9fjoYNG2LgwIHo1asXFi5cKJxHJBJhw4YNWLx4MRYvXgyJRIKxY8fi7t27uHv3rlCvadOmiIqKwrp167BmzRqkpaVBV1cX7dq1Q69evcq8bicnJ8TExODRo0dQUVGBlZUVNm3aJFxzVdou6VxHjx7F9u3bUVBQAHNzc3z11Vfo1KlTqcfMmTMHn3/+OWbMmIHMzEwEBwfjgw8+KLW+t7c3Vq9eDVdXV4VMW4sWLRAdHS3Mp8zKyoKenh5cXFxgbW1doWupDHt7e0RERODo0aN49eoVDA0N4efnB39//2o7R329jyuL9/+/6/6nihPJZDJZbXeCiOq+3Nxc9O3bF66urggODq7t7hBVCu9jIuXADCoRVUpUVBQKCwthbm4OqVSK7777DklJSSUudCBSVryPiZQTA1QiqhR1dXVs2bIFjx49AgC0adMGmzdvLnUrFyJlxPuYSDlxiJ+IiIiIlAqfJEVERERESoUBKhEREREpFQaoRFRnnDt3DtbW1iW++vTpo1D/jz/+wNixY9G+fXt06NABI0aMwP379+Xq3L9/H35+fnB0dIS7uzuCgoKQnZ0tV2fbtm0YMGAAnJ2d4ejoCF9fX+zatQslzZAKDw9Hjx49YG9vj0GDBuHcuXPV+yXQv1pKSgoWL16M7t27o127dujXrx8iIiLk7rXHjx9j6tSp8PDwgL29Pbp06YKZM2cq3NtA+e5voOgpWwMGDIC9vT169eqF3bt3K9TZvXs3pkyZgo4dO8La2hrHjh2r3osnegMXSRFRnWFra4uoqCi5slevXmHSpEnw8PCQKz9z5gymTJmCoUOHYurUqcjLy8OVK1fknn4jlUoxbtw4GBsbIzQ0FCkpKQgODkZaWhrWrFkj1MvIyICPjw8sLS2hpqaGs2fPIigoCK9evcLUqVOFeuHh4QgJCcHs2bNhY2OD6OhoTJo0CdHR0e9kP0eq+4ofNzp79mwYGxvj999/x4oVK1BYWIgJEyYAgLBP6CeffAIDAwM8f/4cmzdvxtixY/HDDz8Ie8GW9/6+dOkSAgIC0L9/f8ybNw9//vkngoKC5J7wBQA//PADAMDT0xMxMTHv7kuh+klGRFSH7du3T2ZlZSW7cuWKUJaXlyfr1q2b7Kuvvirz2M2bN8vatWsne/nypVD2448/yqysrGT37t0r89g5c+bIevfuLbzPycmRdejQQfbll18KZfn5+bK+ffvKPvroo4peFtVDSUlJMisrK9nevXvlyj/88EPZ4MGDyzz2/v37MisrK9mPP/4olJX3/vbz81Nof+HChbLOnTvLCgoKhLLiPz98+FBmZWUlO3r0aMUvkqicOMRPRDVi3rx58PHxwblz5zBgwAA4Ojpi8ODBuHbtWrWe59ChQ2jZsiUcHByEsjNnzuDJkycYNWpUmcfGxsbC3d1dyDgBgJeXF8RiMU6dOlXmsU2bNkVeXp7w/s8//xQyrcVUVVXRr18/nDp1qsTpAFR31cT9XXw/aWtry5VLJJK33j9NmjQBAOTn5wtl5bm/c3NzERcXB29vb7n2fH198eLFC9y4cUMoU1FhyEDvDu82IqoxL168QFBQEPz8/BASEoLs7GxMnz5d+EFcUFCA/Pz8Ml8FBQWltp+cnIy4uDiFZ3VfuXIFTZo0QXx8PLy8vGBjY4O+ffviyJEjcvUSEhIUHrMpFothZmaGhIQEhfPl5+cjMzMTv/76K2JiYjB27Fi5toCiZ5S/ycLCApmZmXj27Fk5vjGqS6r7/m7evDk6d+6MTZs24c6dO3j16hWOHz+O48ePY/To0QrnLywsRF5eHh49eoTly5fDyMgI7733nvB5ee7vBw8eIC8vT+G+bd26tdAGUW3gHFQiqjHp6enYtWsXLC0tARRtij5hwgRcuXIFzs7OGD9+PM6fP19mGyYmJjh58mSJnx05cgQFBQUKAWpycjJev36NBQsWYNasWTA3N8f+/fsxe/Zs6Ovrw9nZGUDRHD2JRKLQrkQiQXp6ulxZYmIievfuLbz39/cXng1f3JZYLIaGhobccY0bNwYApKWlwdDQsMxrpbqlJu7v9evXY/bs2fD19QUAiEQifPLJJxgwYIDCsXPnzsXBgwcBAGZmZti+fbtc9rU893fx//+zXvH7f/53QPSuMEAlohqjr68v/PAGIGRzirOJS5cuRWZmZpltiMXiUj87ePAgbG1tYW5uLldeWFiInJwcfPrpp8IjKzt27Ijbt29j8+bNQoBaGplMBpFIJFdmZGSEvXv3IisrCxcuXMCWLVugoqKCmTNnCnX+eUxxW6V9RnVbdd/fMpkM8+fPx99//401a9bAwMAA58+fR0hICCQSidyCJQCYNWsWxo4di6SkJOzYsQMTJkzAf/7zHxgbG5d5zpLu79LuT963VFsYoBJRjflnVkZNTQ0AhJX0LVq0eOvcutJ+QD548ABXr17F/PnzFT4rzlp27NhRrh03Nzf8/PPPcv2TSqUKx2dkZJQ4NFr8+Es3NzdoampizZo1GDFiBPT09CCRSJCTk4OcnByoq6sLxxW3X9wn+veo7vv7119/xbFjx/DDDz+gTZs2AABXV1dIpVKsWrUKgwYNkpsH2rx5czRv3hwODg7o2rUrevXqha1bt2Lx4sVC/952fxffl//MlBYfV1IGluhdYIBKRLWmKkP8Bw8ehIqKCvr27avw2T+Dy2L/zBxZWFgozLHLzc3FgwcPMGjQoDL7ZWtri4KCAjx+/Bh6enrCORMSEmBjYyPUS0hIgJaWFgwMDMpsj/59Knp/37t3D6qqqgpbkrVt2xYRERFIS0uTW/D0Jk1NTbRq1QqJiYlCWXnubzMzM6ipqeGvv/6S26rt3r17QhtEtYEBKhHVmqoM8R8+fBiurq4lBn5dunRBgwYNcPbsWeEHrEwmw7lz54TMFAB4eHhg48aNSE1NRdOmTQEAx48fR25uLjw9Pcvs18WLFyESiWBqagoAcHJygra2No4cOSIEqAUFBTh69Cg8PT05VFoPVfT+NjExQUFBAW7evCn3S861a9egqakp3KMlkUqluHPnjtwvbOW5v8ViMTp27IijR4/Kzak+dOgQ9PT05PpB9C4xQCWiWvPPlcPldePGDSQkJAgbl/+Tvr4+Ro4cia+++goymQwtW7bEgQMHcO/ePaxcuVKoN3z4cOzatQsBAQEICAjAy5cvsXLlSvj6+gqBbUZGBiZNmoT3338fLVq0QH5+PuLi4hAZGYlhw4ahWbNmAIp+0Pv7+yMkJAQ6OjrCRv0PHz7E2rVrK3WdVLdV9P729PSEiYkJZs2ahWnTpsHAwABxcXH47rvvMHHiROGXnLCwMGRkZMDJyQk6Ojp4/PgxIiIikJ+fj3Hjxgntlef+BoBp06Zh9OjRWLhwIXx9ffHnn38iOjoay5Ytk5tSEB8fj8ePHyMlJQVA0W4ZAKCjowNXV9dKf09EJWGASkR1zsGDByEWi+Hl5VVqnU8//RRaWlr49ttvkZqaCktLS2zatAm2trZCHYlEgoiICAQFBWHGjBnQ0NCAj48PAgMDhTrq6uowNzfHjh078OzZM2hoaMDMzAxLly5VWFk9ceJEyGQyREZGIjk5GVZWVvj222/5FCkqFy0tLURERCAkJARr165Feno6TE1NMWfOHLnA08bGBjt27MAPP/yArKwsGBgYwMXFBWFhYWjevLlQrzz3NwC0b98eGzZswNq1axETEwNDQ0MsXLhQYVHW7t27ceDAAeH9tm3bABTNk42MjKyJr4TqMZGMu0cTERERkRLhRv1EREREpFQYoBIRERGRUmGASkRERERKhQEqERERESkVBqhEREREpFQYoBIRERGRUmGASkRERERKhQEqEVEN2b9/P6ytrYWXjY0NPDw8MH/+fDx79uyd9KFHjx6YN2+e8P7cuXOwtrbGuXPnKtTOn3/+ibCwMEil0uruIubNm4cePXq8td6YMWMwZsyYSp2jR48emDJlSqWOLavNN79bIqo+fJIUEVENCw4ORqtWrZCdnY0//vgDmzdvxvnz53Hw4EFoamq+077Y2toiKioKrVu3rtBxly5dwvr16zFw4EBIJJIa6h0RUREGqERENczS0hL29vYAgI4dO6KgoAAbNmzAzz//jPfff7/EY16/fo2GDRtWe18aNWoER0fHam+XiKg6cYifiOgdKw4Qnzx5AqBoiLt9+/a4ffs2Jk6ciPbt22P8+PEAgNzcXGzYsAF9+vSBnZ0dOnbsiPnz5yMlJUWuzby8PKxatQqdO3dGu3btMGLECFy9elXh3KUN8V+5cgVTp06Fm5sb7O3t8d577+GLL74AAISFhWHVqlUAgJ49ewpTFt5s48iRIxg2bBgcHR3Rvn17+Pn54caNGwrn379/P7y8vGBnZ4e+ffsiJiamUt9hsfXr12PIkCFwdXWFk5MTBg4ciOjoaJT2FO/jx4/D19cX9vb26NmzJ3bu3KlQ59WrV/jyyy/Ro0cP2NnZoWvXrvjiiy+QlZVVpb4SUfkxg0pE9I4lJiYCAHR0dISyvLw8+Pv7Y/jw4Zg0aRIKCgpQWFiIgIAAXLx4EX5+fnBycsLjx48RFhaGq1evYt++fdDQ0AAALFq0CDExMZg4cSI6d+6Mu3fvYvr06cjMzHxrf06fPg1/f3+0atUK8+bNg5GRER4/fowzZ84AAIYMGYL09HRERkZi/fr10NPTAwBhmsCmTZuwbt06fPDBB/D390deXh7Cw8MxatQoREdHC/X279+P+fPno2fPnpg3bx4yMjKwfv165ObmQkWlcvmSx48fY9iwYTA2NgYAXL58GUFBQXj27BmmT58uV/fmzZtYsWIFpk+fjmbNmuHgwYP44osvkJeXBz8/PwBFmevRo0fj6dOnmDp1KqytrXH37l2Ehobizp072LFjB0QiUaX6SkTlxwCViKiGFRYWIj8/Hzk5Obhw4QI2btwILS0tuYVBeXl5mDZtGgYNGiSUHT58GKdPn0ZYWBh69+4tlLdp0waDBw/G/v37MXLkSCQkJODAgQMYP3485s6dCwDo3LkzdHV1ERgY+Nb+LVu2DEZGRoiOjoa6urpQXtwXQ0NDGBkZAQDatm0LU1NToU5SUhLCwsIwevRoLFy4UCjv1KkTvLy8sH79eqxbtw6FhYUICQmBra0tvvnmGyHI69ChA7y8vKCvr1+h77RYcHCw8OfCwkK4urpCJpNh586dmDZtmlww+fz5c8TExKBNmzYAAE9PT6SkpGDDhg0YOXIkGjZsiMjISNy+fRvff/+9MC3D3d0dBgYGmDlzJmJjY+Hp6VmpvhJR+XGIn4iohg0dOhS2trZwcnLClClT0KxZM2zZsgXNmjWTq+fl5SX3/pdffoFEIkH37t2Rn58vvNq2bQs9PT2cP38eAIShdl9fX7nj+/btiwYNys5D3L9/Hw8ePMDgwYPlgtPy+u2335Cfn4/+/fvL9VFdXR0uLi5CH+/fv4/nz5/Dx8dHLmg0MTFB+/btK3zeYmfPnsX48ePRoUMHtG3bFra2tggNDUVaWhpevnwpV9fS0lIITov5+Pjg1atXuH79OoCi79zS0hJt27aVu54uXbpAJBIJ10NENYsZVCKiGvbll1/CwsICDRo0gK6ubonZwoYNG6JRo0ZyZS9fvoRUKoWdnV2J7aampgIA0tLSAEAYei/WoEEDNGnSpMy+Fc9lNTAwKM+lKEhOTgYADB48uMTPi4fui/v6z6C8uOzx48cVPvfVq1fh5+cHV1dXLF++HIaGhlBTU8PPP/+MTZs2ITs7W+E8JZ0b+P/v8OXLl0hMTIStrW2J5yy+DiKqWQxQiYhqmIWFhTBcXJqS5jU2bdoUTZo0wdatW0s8RktLCwCEIPTFixdygWZ+fr4QeJWmeB5sZfdlbdq0KQAgNDRUmAdaVr3igPZNJZWVx+HDh9GgQQNs3rxZLvv7888/l1i/rHMXf4dNmzaFuro6VqxYUWIbxddBRDWLASoRkZLq1q0bDh8+jMLCQrRr167Uem5ubgCAgwcPymVbjx49ivz8/DLPYW5uDjMzM+zbtw8TJkyAWCwusV5xeU5Ojlx5ly5d0KBBAzx48EBhisI/z6Onp4dDhw5hwoQJQkD++PFjXLp0qVJzUEUiEVRVVeUWWGVnZ+PHH38ssf7du3dx69YtuWH+Q4cOQUtLS8iYduvWDZs3b0aTJk3QvHnzCveJiKoHA1QiIiXl7e2NgwcPYvLkyRgzZgwcHBygpqaGp0+f4ty5c+jZsyd69eoFCwsLvP/++4iIiECDBg3QqVMn3L17F+Hh4QrTBkqyePFi+Pv7Y+jQoRg/fjyMjIyQlJSE06dP46uvvgIAWFlZAQAiIiIwcOBANGjQAObm5jA1NcXMmTOxbt06PHz4EB4eHpBIJEhOTkZ8fDwaNmyImTNnQkVFBbNmzcLChQsxbdo0DB06FFKpFOvXry9x6L08PD09sX37dnz88ccYNmwY0tLSEB4eXmqQra+vD39/f0yfPh16enr48ccfcebMGQQGBgp7zo4bNw4//fQTRo8ejfHjx8Pa2hqFhYVISkrCb7/9hokTJ5b5ywIRVQ8GqERESkpVVRUbN27Ezp078cMPP+Dbb7+FqqoqDA0N4eLiIgSNAPDFF1+gWbNmOHDgACIjI9G2bVuEhYVhzpw5bz1P165dsWvXLnzzzTcICgpCTk4ODA0N5XYZcHNzw5QpU3DgwAFER0ejsLAQO3fuFMotLCywc+dOHD58GLm5udDT04OdnR1GjBghtDFkyBAAwNatWzF9+nSYmJhgypQpuHDhQqUWH7m7u2PFihXYsmULpk6dCgMDAwwdOhQ6OjpYsGCBQv22bdvigw8+QFhYGP7++2/o6+tj/vz5wp6zAKCpqYndu3fj22+/RVRUFB49egQNDQ0YGRmhU6dOMDExqXA/iajiRLLSdjMmIiIiIqoF3GaKiIiIiJQKA1QiIiIiUioMUImIiIhIqTBAJSIiIiKlwgCViIiIiJQKA1QiIiIiUioMUImIiIhIqTBAJSIiIiKlwgCViIiIiJQKA1QiIiIiUioMUImIiIhIqTBAJSIiIiKl8n+pujLiYYtS9gAAAABJRU5ErkJggg==", + "application/vnd.jupyter.widget-view+json": { + "model_id": "81053043727a4c1dbe23304e5ad6282a", + "version_major": 2, + "version_minor": 0 + }, "text/plain": [ - "
" + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" ] }, "metadata": {}, "output_type": "display_data" }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, { "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "5d1d3f2835b74004b267d67d04c24663", + "version_major": 2, + "version_minor": 0 + }, "text/plain": [ - "
" + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" ] }, "metadata": {}, "output_type": "display_data" - } - ], - "source": [ - "cc.plot_conf_mat(\n", - " conf_mat_dict={\"Geneformer\": all_metrics[\"conf_matrix\"]},\n", - " output_directory=output_dir,\n", - " output_prefix=output_prefix,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "edf6ffd9-8b84-4d31-8b39-11959140382f", - "metadata": {}, - "outputs": [ + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA5cAAALvCAYAAADxkzH9AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy81sbWrAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzdd1hTZxsG8DtMGSLTrRUVUHGBe1J3pYoTZx11a7WOah211dpqHXXUOmtddSKKW9u66lYEHDgQF+7BVkCZ5/sjH8eEBAgkcALcv+vKRfK+b855AgnJk3fJBEEQQERERERERKQFA6kDICIiIiIiooKPySURERERERFpjcklERERERERaY3JJREREREREWmNySURERERERFpjcklERERERERaY3JJREREREREWmNySURERERERFpjcklERERERERaY3JJRFREda6dWu4uLjAxcUFz54909lxp02bJh7Xz89PZ8cl/TBgwADx73v58mWpw6EspP+dXFxcdHrcovocSE1Nha+vL4YMGYKmTZuiZs2a4u9h2rRpUodHJDkjqQMg0tSAAQPg7++vts7ExATFixeHpaUl7OzsUKNGDdSoUQONGzdGuXLl8jlSIiIiKmySkpIwfPhwXLp0SepQiPQWk0sqFJKSkhAZGYnIyEg8fvwYQUFBAAADAwM0a9YMAwYMgIeHh8RREhVM06ZNw969ewEAv/zyC7p37y5xRESkS4pf3v71119o1KiRxBHpp40bNyollg0bNkTFihVhamoKAKhTp45UoWns2bNnaNOmDQCgXLlyOHnypMQRUWHD5JIKpFq1aqF27dri7bS0NLx79w7v3r3D/fv38fz5c7H87NmzOHv2LLp06YLvv/8exYsXlypsIiIiKqDSv2QDgAULFqBr167SBUOkp5hcUoHk4eGBcePGZVofHh6O/fv3Y8uWLXj16hUAYP/+/bh//z62bdsGMzOz/AqVSK/l1bfW8+fPx/z58/Pk2CS9LVu2SB0Caeju3bt5ctyi9hx4//49Hj16BAAwNjaGl5eXxBER6Scu6EOFkoODA4YNG4YjR47gs88+E8tv3bqFqVOnShgZERERFTRv374Vr9vb28PAgB+hidThK4MKNQsLCyxbtgytWrUSy/75558itbIdERERaSc5OVm8zsSSKHMyQRAEqYMg0oTiggNjx47NclhsRjExMWjdujXi4+MBAE2bNsXGjRuzvI8gCPj7779x7Ngx3LhxA5GRkQAAOzs71KlTB+3atUOHDh0gk8myPX98fDwOHDiAU6dOITQ0FNHR0UhLS4ONjQ1sbGxQvnx5NG3aFM2bN8cnn3yi9hhpaWkICgrC+fPncf36dTx8+FA8TokSJVC5cmU0bdoUvXr1gq2trca/m+TkZOzevRtHjhzB/fv3ER8fDwcHB9SqVQve3t5o1qwZAPmWFelzWU+cOIHy5ctnedyEhATs27cPZ86cwd27dxEVFQUDAwM4ODigXr168PLyQpMmTTSOMycuXryIgwcPIjg4GC9fvsT79+9haWkJW1tb2Nvbw93dHc2bN0fdunVhbGyc5bGio6Oxd+9enD17Fg8fPkRUVBRMTU1RsmRJNGrUCN27d0etWrWyPMbvv/+OFStWAPj43E1JScGhQ4ewb98+PHjwANHR0bC2tkbt2rXh7e2t9IVIXj7WrP6uinXZyfiazGoRoH/++Qdff/01AMDR0RF///23Rud4/vw52rRpA0EQYGxsjLNnz8LGxkZt2+TkZBw+fBinTp3CzZs3ERUVBUEQYGtri7p166Jjx45o27atRq/frGzYsAELFiwAADRv3hzr16/X6H5XrlzBF198AUD+P+XMmTMwMlKeqRIVFSU+9+7fv4/Y2FgYGBjA1tYWNjY2qFSpEpo1a4bmzZujVKlSWj2OnNJkARh1z4GEhATs3bsXhw4dwuPHj/H27VvY2dmhXr166N+/P+rVq5ftuQVBwIkTJ3D06FHcvHkT4eHhSEpKgpWVFWxsbFCyZEk0aNAAzZs3R82aNbNNBF6+fAk/Pz+cP38eT548QUxMDMzNzVG2bFk0adIEvXr1gqOjY5bH0OfHqrgFScYhsjnZniTj6zir50Bevi7S5dd7jKa/o4YNG6oMFX7+/DlOnz6NgIAAhIaG4uXLl/jw4QMsLS1RsmRJuLu7o1u3bqhbt26O4zp9+jROnDiBoKAghIeHIy4uDmZmZqhQoQJq1aoFDw8PeHh4iL8/Pz8/TJ8+XePjZzacWlefjdQ9f968eQM/Pz8cP34cL1++RFRUFCwsLBAQEKB035cvX2LPnj24ePEiHj16hLdv38LY2Bg2NjawtbWFk5MTmjVrhqZNm+bo8xDpBudcUpFgbW2N7t27i//4L168iJiYGFhbW6ttHxYWhokTJ+L27dsqdQkJCXj69CkOHToEV1dXLFu2DBUrVsz03FevXsX48ePx+vVrlbrXr1/j9evXCAkJwfHjxwHIh+5mfDNNTk5GmzZt1B4DkM8xDQ8Px+XLl7F27VrMnj0bXbp0yTSmdM+fP8fo0aNV3kSePXuGZ8+e4ejRo+jVqxd++OGHbI+l6OjRo5g7dy7Cw8NV6h4/fozHjx/Dz88PrVq1wqJFi3S2yFJ8fDwmT56sdh5hTEwMYmJi8PDhQ/j7+2PNmjX4+eef4e3tnenxtm3bhqVLl+Ldu3dK5UlJSXj37h0ePHiAHTt2oHv37pg9ezZMTEw0ivP169cYP348rl69qlQeHh6OEydO4MSJE+jevTvmzp2b6QdjXT/W/NSqVStYWVnh7du3ePToEYKDg7NN0AHg4MGDSP8+tEWLFpkmlpcvX8bMmTPx5MkTlbrnz5/j+fPnOHz4MOrWrYvly5drlZh16tQJixYtQlpaGi5evIiIiAjY29tne78DBw6I1z09PVVe88ePH8eMGTMQGxurct8XL17gxYsXuHXrFg4fPoxSpUrhzJkzuX4M+eXBgwcYN24cHjx4oFT+6tUrHD58GIcPH8ZXX30lfvGgTkREBMaOHavy2gEgrhh+//59XLhwAb/99hs2btyIpk2bqj1WWloafv/9d6xfvx6JiYlKdbGxsYiNjcWdO3fw119/YdiwYZgwYYLGX0bo22PNb3n1ukgn1XtMTixYsAAbN26Euj6c9P/RoaGh2LlzJzp16oSff/5Zo/Ug7t27h2nTpuHmzZsqde/evcPt27dx+/Zt+Pj4wNPTE0uXLtXJ4wF0+9koo6z+5ynauXMnfvnlF3z48EGpPDk5GQkJCXj+/DmCg4Ph5+cHd3d37NixQ+MYSDeYXFKR8dlnn4nJpSAICAgIQNu2bVXaPXjwAF988QWioqLEMmdnZ1SvXh0ymQy3b99GaGgoAHki2LdvX2zdulXtN9svX77E0KFDxR5TY2Nj1KxZE5988gmKFSuG9+/f4/nz5wgJCUFcXFymsaelpYmJpbm5OZycnFChQgVYWFggJSUFr1+/xrVr1xAXF4eEhAR8++23MDY2hqenZ6bHjIqKwqBBg/D06VOxzNHREbVq1YKRkRHu3r2LW7duYdeuXbCwsMjqV6tk06ZNmD9/vviGamFhATc3N5QuXRppaWm4f/8+goODIQgCTp06hS+++AI7d+7UySJL3377rVKy9cknn6B69eooUaIEUlJSEBUVhdDQUI164+bNm4fNmzeLt62trVG3bl2ULFkSiYmJuHPnDkJDQyEIAvbs2YM3b97gjz/+yLaXJCEhAcOGDUNoaCjMzMxQr149lClTBvHx8bh8+bL4LbCfnx8cHR0xYsSIPH+smenatStiYmJw8eJFPHz4EADQpEkTVK5cWaWt4urN2TExMUGHDh3g6+sLQJ40appcpstsMY2jR49iypQp4hA2U1NT1KlTB+XLl4eBgQHCwsJw7do1pKSk4Nq1a+jduzd2796t0QdfdUqWLInGjRvjwoULSE1NxeHDhzFo0KAs75OUlIR//vkn08cSHByM8ePHIyUlBQBQrFgx1KlTB+XKlYOJiQni4uLw9OlThIaG4v3797mKO7+9efMGgwcPxps3b2BlZYV69erBwcEB0dHRuHTpkvgFzsqVK1G1alW1/7tSU1MxYsQI3Lp1SyxzdnaGk5MTihcvjqSkJISHhyMkJERt0pHxWBMnTlT6Ozg4OKBOnTqws7NDfHw8bty4gSdPniAlJQVr1qxBVFQUfvrppwL3WLPSv39/AMCxY8fw5s0bAEDbtm3VfuFSpUoVjY+bF6+LdPn9HpP+O4qPj8e+ffvEc2ZcKTbjqKNXr15BEATIZDI4OjrC0dER1tbWMDIyQkxMDO7cuSN+AXbo0CG8e/cOa9euzfILjMuXL2P06NHiZwoAKFu2LGrVqgVra2skJCTg0aNHuHv3LpKTk5W+NKlSpQr69++f7ePIjC4/G2V09epVrFixAsnJybC2tkaDBg1gY2ODyMhI3LlzR2x3/PhxzJo1S7xtaWmJunXronTp0jA0NERcXBzCwsIQGhqqNIyZ8plAVEB88cUXgrOzs+Ds7CwsX748x/dPSEgQatSoIR5j8eLFKm0SExMFLy8vsU3jxo2Fs2fPqrQ7e/as0KhRI7Fdt27dhKSkJJV2P//8s9imX79+wqtXr9TGlpycLFy+fFn45ptvhJSUFLVxTZs2Tbh06ZLa86S3WbdunfgY69evL8TFxWX6+5g0aZIYW506dYSjR4+qtLly5YrQvHlzwcXFRXB1dRXbP336VO0xL1y4IFSrVk1wdnYWXF1dhVWrVgnx8fEq7W7fvi14enqKx5s1a1amcWrq9u3b4vHq1q0r/Pfff5m2ffLkibBq1SrhxIkTaut9fX2VjrV9+3YhMTFRpd3FixeFFi1aiG3/+OMPtcdbvny52KZmzZqCs7OzMHXqVCE6OlqpXUJCgtLfpW7dupn+/nT1WFu1apXt33Xq1Klimz179mR6rpzc5/Lly2J9s2bN1D7vFd26dUts7+7uLnz48EGlTWhoqFCnTh2x3dy5c1V+x4Ig/5307dtXbDds2DCNHlNm/Pz8xGN179492/b//vuv2L59+/Yq9aNHjxbrx40bJ8TExKg9TmJiovDff/8J33//vVbx54bi/+NLly6pbaP4HEh/3i9cuFBISEhQahcdHS0MHDhQbNumTRshLS1N5XjHjh1Tes5cu3Yt0/hCQ0OFRYsWCdevX1dbv2zZMvFYTZo0EQ4fPiykpqaqtDt69KhQr149se3hw4cL3GNNP46zs3Omx9Dk75nT++j6dSEI0r7HPH36VDxeq1atsm2/bt06Yc+ePUJkZGSmba5cuSK0a9dOPO6+ffsybfvixQulzx2tW7cWzpw5o7ZtTEyMsGPHDmH+/PlaPw5B0P1nI0FQfv7UqFFDcHFxEVauXKnSXvG9VzGGOXPmqLy+0sXFxQlHjhwRFi1apNHjI93ijGQqMszMzJS+jY2IiFBpc/DgQYSEhACQ9zKuW7cOzZs3V2nXvHlz/PHHH+KQnfThaRkFBgaK1+fNm5fp8DsjIyM0bNgQv/76KwwNDVXqTUxM8Msvv6BRo0aZzhE0MTHBsGHDMH78eADyle3279+vtu29e/dw6NAh8fbChQuVVtVNV79+faxbtw6GhobZfguYlpaG2bNnIy0tTXy8o0ePhrm5uUrb6tWrY9OmTbCzswMA7N69W9wyJrcUf9cDBw6Eh4dHpm0rVKiA0aNHo3Xr1ip1cXFx4lwhQ0ND/PHHH+jbt6/aIa+NGzfGxo0bxQ20//zzz2x7kpKSktCpUyfMnz9fZVi2mZkZ5s2bhzJlygCQ93L+999/efZYpdSgQQOULVsWgHw48MWLF7Nsr9hr2aFDB/F3rujnn38Wf/8TJkzAjBkz1A59r1ChAv7880+xJ+bMmTO4fv16bh8K2rdvL/aK3Lx5U9yuIDPZ9cCm/33TX/clSpRQexwTExN4eHhgzpw5uQ093yQlJWHkyJGYMmWKSg+StbU1Fi9eLP6vePr0KW7cuKFyDMXn/fjx47PcsN7JyQmTJ09W26P+7NkzrF27FoC852bLli3w9PRUO+rgs88+w++//y7eXrFihdphjvr6WKWk69eF1O8xOTVs2DB07949yzl/9evXx4YNG8T/Z1u3bs207eLFixEdHQ0AKFeuHHx8fNCiRQu1bUuUKIE+ffrobHV8XX82yiglJQXjx4/HmDFjVD7jpL/3xsfHizGUKVMGM2fOzLQ32sLCAh07dsTkyZM1f5CkM0wuqUhRnHehuKx4Oh8fH/F6nz59ULNmzUyPlb7wSjp14/oVh67k16TyHj16iNcz+8Du5+cnXm/QoAHat2+f6fGqVaumtIhDZk6ePImwsDAA8qGT2e0B5uDggMGDBwOQz5U4evRotufIiuKwYm1+13v27BGfG926dUODBg2ybF+lShVxWFFMTAzOnj2bZXtjY2NMmzYt03pTU1N8/vnn4u3g4GCVNrp6rFKSyWTo1KmTeFvxg2VGaWlpSl+GqHtuhYSE4NKlSwCASpUqZTqcOJ25uTm++uorjc6fHQsLC6XkPatjvXv3TukLA3WPJf3/hpmZWY6GpOszW1tbpd93Rvb29kpfkmT3vM9svq0m/vrrL6SmpgKQJwDZDfds0qSJ+EH6wYMHauebKdKnxyolXb8upH6PySvly5cXF0MKDg5WO0Xm9evXSvHPnj0710P5c0PXn40yKlWqFIYPH55lG8Xfi7W1tdaLsVHeYXJJRYriN5yKiV/6bcUJ8opJWmYU/4EGBwcjISFBqT69BwqQLw6jC2lpabhx4wZ27dqF5cuXY968eZgzZ454WblypdhWca6CIsWtWBQ/4Gemc+fO2bZRXFBEMTnKSuPGjcXrit/U54bi73rfvn0qfwtN5fXjSJ9/lZUaNWqI19XNmdTVY5Wa4qJTx44dU1mgId3ly5fF+WClS5dGw4YNVdqcPn1avP7ZZ5+pHQGQkS6ff4ofdLP6EP3333+L86Dc3NxQoUIFlTbpf9/Y2Fitkl590qpVK7W9zYpy8rz38fER56TmlOJzJS9e4/r0WKWmy9eF1O8x2njx4gX+/vtvrFmzBgsXLsRPP/2k9L797NkzAPL1INJ75xRduHBBfA5UqlQJLVu2zLfY8+KzUUbt27fPdPGmdLa2tihWrBgAIDQ0FFeuXMk2DpIGF/ShIkUxobS0tFSqCwkJEb/NNjc312j58erVq8Pc3BwJCQlITU1FSEgI3N3dxXpPT0+x93Dp0qU4f/48OnfujKZNm2a7lUdGKSkp2Lp1KzZu3Kjx8J70ITSK0tLSxEn3gGYLsbi6ukImk2U5HExxRcP//vsv08RWkeIqrC9fvsy2fVY8PDzEv8Xt27fx2WefoXv37vj000/h6uqa7ZYj6RQfx759+8RVfLOi+PfI7nE4OztnezzFoZwZV6oFdPdYpVa1alVUr14dd+7cQXx8PE6cOKH2Q6PiCpKdOnVSO3xR8e927do1jYaKKj6ftX3+NW/eHLa2toiKisKTJ09w7do1tdsLKD6WzHpePD09sWbNGgDAlClTcPjwYXh6eqJx48YoWbKkVnFKRRfP+w4dOuD3339HWloazpw5g88//xzdu3dHy5Yt4eLiotHeg9HR0WLvFyBfHEaTHpD79++L1/PjNa6Lx6oPdPm6kPo9JjeuXr2KxYsXIyAgINvh1OnUvW9fu3ZNvK7uy7W8lBefjTLKqic0nbGxMdq2bYtDhw4hNTUVX375JTp06IAOHTqICwCRfmBySUWK4rCKjPOYFP+hlylTRqM3bwMDA5QuXVpcSTPjm0LPnj1x7tw5cQU8f39/cV+nkiVLon79+mjcuDHatWuX5fDGpKQkjB49GufOncs2JkUZe2cB+e9Acf5k6dKlsz2OhYUFihcvrnYocbr0niUAGiVkGWU89ubNm/H48eMs76O4RYq1tTV++eUXTJ48GcnJyXj9+jVWr16N1atXo1ixYqhduzYaNGiA1q1bZ/pGFh8fr/Q7y2zOak4eR0aaLImv+A2uuh4LXTxWfeHl5SV+SDx48KBKcpmYmIh///1Xqb06is+/S5cuiUNkNZXd3y07RkZG8PT0FOdMHTx4UOVD9KtXr8T92oyNjdGxY0e1xxo1ahT8/f0RFBQkrnh56tQpAPIhdPXr10eTJk3Qtm1blS/JciosLAx//fVXlm3S98vThi6e91WqVMHUqVPFlULDwsKwZMkSLFmyRFw1slGjRmjbtq3aFY0BqKysun379hw+kvx5jeviseoDXb4udP0ek9d2796NmTNnapxUplP3vp2+ijgAtb26eSkvPhtlpGliOGPGDNy+fRsPHz5EcnIyDh06hEOHDkEmk6Fy5cqoX78+mjVrhk8//TTb0QOUd5hcUpGRkJCg1MOUcb6C4j/0nCxZrtg245uCgYEBfvvtN+zbtw8bN25U2k/yzZs3OHLkCI4cOYI5c+aga9eumDJlitoFSFasWCEmlgYGBvD09ES7du3g5OSEUqVKwdTUVKm3Kv2bRXVvahmHp6QPM9HkcWb15pzVViqaSP9mNN3x48fFRDwzGfff/Oyzz+Do6IhVq1bhxIkTYhL94cMHMbFfuXIlXF1dMX36dJX5lNo+BnWPIyNdzRPR9rHqC8X98M6dO4fo6GilDxonTpwQ/y4uLi6Zfmuu6+dfbnh5eYkfoo8cOYLp06crJREHDx4UFyPJap9OMzMz/PXXX9i2bRu2bdumtF9n+h60+/btg5mZGfr27Yvx48dr/DrO6PXr19kO2bexsdE6udTV837w4MGoWbMmVq9ejQsXLoi/z7i4OJw7dw7nzp3D4sWL0bhxY8yYMUPl+aKulzCn8us1ru1j1Re6el3ow2tcUw8ePMCsWbPE92AXFxd4e3ujbt26KFeuHCwsLJSSn2nTpmHv3r0AIP4uFCl+tlC3gFFeyovPRhlp+v/Lzs4Ou3fvxsaNG+Hj4yN+4SAIAh48eIAHDx7Ax8cHJUqUwLBhwzB06FCNpkiQbjG5pCLj5s2bSm8uGb89VVw4Iyd7xym2Vbf4hkwmQ7du3dCtWzc8efIE/v7+CAgIQEBAgLjHZEpKCnbv3g1/f3/4+Pgo9WImJSWJ+3MC8o2Zs1rIILs34IxvDh8+fNDozSq734mZmZn4wW3//v2oVq1atsfMCy4uLvjtt98QFxeHgIAAXLlyBYGBgbh586aYgN26dQsDBw7EkiVLlL4lz/i7CQgIkGTzbU1p81j1heJ+eOmLbvTr10+s12RvS0D5b7dy5Uq1e9jmtTp16qBSpUoICwtDVFQUzp8/r5SUafpYAHkPzuDBgzF48GBxflFgYCACAgLEPW/fv3+PDRs2IDAwEH/99VeuE8yCpn79+li/fj2ioqJw5coVBAQEIDAwEHfu3BE/mF+6dAm9evXChg0bUK9ePfG+iv/rrKys9H7eljaPVV/o6nWhL+8xmti0aZPYI+3h4YGVK1dmOV0hu+RL8bNFfs+xz6vPRtrEM3bsWIwZMwa3bt0SP08FBgaKPaSxsbFYvHgxrl27hpUrV3Lxn3zG5JKKDMWV1gwMDFTG/yt+W/ry5Utx8+OspKWlKfWGZje0o2LFiqhYsSJ69uwJQD4kzcfHB3/99RdSUlLw5MkTrFixQqlH7saNG+KbibOzc7YfSl+8eJFlffHixWFsbCwmH69evcp2xdH4+Phsv/G3s7MT24SFhWn9xq+YUOeGpaUlPv30U3z66acA5En3sWPH8Pvvv+P58+dIS0vDjz/+iFatWokfyq2srGBiYoKkpCTxcdSqVUurOPJDbh6rPvHy8sKFCxcAyOdepSeXiqvvGhgYZLmwlOJIhOyGU+elzp07i1tXHDhwQPwQHRoaKo5csLS0zNHWMM7OznB2dhY3c79z5w62bt2K3bt3AwCuX7+Obdu2YejQoTmOt1GjRkojKgoSW1tbcc4VAERFReHw4cNYuXIloqOj8eHDB8yaNUtppeH0rSkA+TDJqKioArHicm4eqz7RxetC1+8xeUlxpfbx48dnOw9e3aJOihSft+mL/+SX/PhslBsGBgaoVasWatWqhS+//BJpaWm4evUqNmzYIA6bPnHiBP755x+1W61R3ikYM8KJtBQVFYV9+/aJt5s3bw4rKyulNtWqVROHTyQkJGj0gSskJERM/AwNDXP8ZlepUiVMnToVX3/9tVh28uRJpTaK80ycnJyyPWZ238QbGBgoLTihbo+1jG7dupXtvBHFfdhyOjc0P1haWqJbt27YvHmzuG9WdHS00iIRgPICR/r4ODSh6WPVVF5/66u4H97Vq1fFHv2jR4+KX4I0bNgw031iAf35uyl++XPy5Enx/4PigiWZ7dOpqerVq2Pu3Lno3bu30rmKOltbWwwYMACrV68Wy+7duyc+nwB5T3n6/qpAwX2Na/JY9YkuXhf6/h6jKCfv2+/evVNaZE8dxZFWiqu950ZO/59L8dkoNwwMDFCvXj2sWLFCaQ9O/m/Mf0wuqdATBAHTpk1TGkoyatQolXYWFhZKi5+kz3/IiuJ+kbVr1871XIj0HicAiIiIUKpTfCPIbkhKWloadu3ale35FFeb0+Sbbk22Q1B8DIcOHVJagECfVKhQQenNPmOcio9j586d4vL4BVF2j1VT6QkqoH7xEW1l3A8v/TmpyQqS6Vq1aiVev3jxomS9cRUrVhQ/CCYkJOD48eMQBEFpI/HsHoumsvq/UZS5ubkpzV3P+LtRHJK5efPmHC+4ok+ye6yaUkzq8uI1rovXRUF5jwGgtOhNdu/bvr6+SovsqdO0aVNxnmpYWFi2+ylnRfFvnd15AWk/G+WGTCZTeq7o8/OksGJySYVafHw8Jk6cqLSvWefOnTOdl6LYE7Bt2za1+02lu337Nnbu3Cne7tOnj0qbqKgojeJUXCI94xAtxZXhrly5kuXw1D///DPLmNN1795d6ZjHjh3LtG1ISIjSG0VmOnTogE8++QSA/M10ypQpGr1xAfK/k7bzSDT9XaekpCh9q5zx992nTx+xV/vVq1eYPXu2xh8+o6Ki8mXRCF09Vk0pfnhNn++naxn3w3v27JnY02pqaioOB8xM7dq1xS9NBEHAlClTNF4AJCkpCbGxsbmMXJXiYzlw4ACuXLkiDlfPbJ9OxViym3+VLqv/G4WRps/72NhYpd9hxt/NkCFDxJ6YmzdvYsWKFRrHkHG12byiq8eqqfx+jef0dQFI/x6TE4rv2ydOnMi0XVhYmEbPv1KlSinNmZ81a1auv0iwsrISk9+oqCiNfoe6/myUG3FxceKUlewoTg/iFiX5j8klFUrh4eFYv349Pv/8c6W5lnXq1MHcuXMzvV/nzp3F4RvJyckYNmyY2i0NLl68iGHDhon/lF1dXdXuz9eqVSt8//33uHz5cqZJR8Y9+TKuylijRg1xOOC7d+8wfvx4lTf/pKQk/Pbbb1i8eLFG3xA6OzsrvVF9++23+Pvvv1XaBQYGYvjw4UhNTc12zoihoSFmz54tfmg7f/48vvjiiyyH3YaEhGDx4sVo1aqV1vNIFi1ahH79+mHv3r2ZJgpRUVGYMWOG+AHR0tJSZe5t8eLFMX36dPG2n58fRo0ahQcPHqg9piAIuHr1KubMmYPWrVvjw4cPWj0OTejqsWpKcRXK48ePa/wGnxPp++EB8pUWFyxYICb1rVu31mjLje+//158/t+9exc9e/YU53Kq8/jxY6xevRpt2rRBUFCQDh6FnKenp/h6uXjxIjZu3CjWZbZPZ7o3b97g008/xfz58zN97QiCgDNnzmD58uVimbaruRYEEyZMwIgRI3D06NFME4UXL15g0qRJ4v/mSpUqiQlJuooVK2L06NHi7RUrVmDatGmZ7h+cmpqKCxcu4Ntvv0W3bt109GiypqvHqinFqRL//PNPnvTmavO6AKR/j8kJxZ6z+fPnq+1pvHjxIgYMGID4+HiN3re/+eYb8UuA58+fo3fv3pn2YL59+xY+Pj5YuHChSp2JiQkqVaoEQP4FZFZfLqfT9Wej3Lh16xZat26N5cuX4969e2rbpKam4sCBA+LqxEDR+N+ob7igDxVIp0+fVto3KS0tDXFxcXj37h3u37+v9k2kR48e+O6777Kc02FiYoIlS5bgiy++QFRUFMLDwzFo0CBUq1YN1atXByBfTEPxWzs7OzssXrxYbfL14cMH7Nq1C7t27YKFhQWqV6+OMmXKwNzcHNHR0Xj48KHS5ty2trYYO3as0jEMDAwwfvx4zJgxA4D8DfWzzz6Dm5sbypYti5iYGPj7+4tJxpw5czB58uRsf4fff/89bty4gefPnyMhIQHjx4+Ho6MjateuDUNDQ4SGhuLmzZsAgC+//BL//vuvuOhAZh8CmjZtitmzZ2P27NlITU3FtWvX4O3tjUqVKqF69eqwsrJCYmIiIiIicOfOHZ0OVxEEAYGBgQgMDIShoSEcHR1RpUoVlChRAh8+fMCrV69w9epVpW9pp06dqnaBm+7du+Pp06dYtWoVAPmG3adPn4aTkxOcnZ1hYWGB9+/f4/Xr17hz506+75+my8eqiZYtW8LMzAzv379HSEgIPD090bBhQ1hZWYnDtps1a6Y0zyWnMu6Hp8nelhk5OztjyZIlmDhxIt6/f49Hjx7hyy+/RJkyZVCrVi3Y2NggOTkZUVFRCAkJyTSZ0JaNjQ2aN2+OU6dOISUlRWnOjyaP5e3bt9i4cSM2btwIa2trVK9eHaVKlYKJiQkiIyNx9+5dpf9xlSpVwsCBA/PksegTQRBw+vRpnD59GsbGxnByckKlSpVQvHhxxMfH4/nz57h+/bq4iqqhoSG+++47tccaO3Ysnj9/Lg7x27t3Lw4cOIDq1aujcuXKMDc3R1xcHF68eKE0f0zdVlH6/lg10b59eyxZsgSCIOC///6Dl5cX3NzclFb59PT01GqBM21fF4C07zE5MXjwYOzevRtRUVGIjY3FsGHD4OrqiipVqkAmk+H27dtigtS8eXPY2dllu69ymTJlsGzZMowZMwYJCQl49uwZhg0bhnLlyqFWrVooUaIEEhISEBYWhpCQECQnJ6NNmzZqj9W+fXusWbMGgPzL5X379qFixYpKn2OmTp0qXtf1Z6PcCg8Px8qVK7Fy5UrY29ujevXqcHBwgKGhISIiInDz5k2l0QX169fXWXJLmmNySQVScHAwgoODs21nYGCAFi1aYNCgQWjWrJlGx65SpQq2b9+OSZMm4fbt2wDk336qGwbi6uqKZcuWoWLFimqPZW5uLn4oiY+PFzeKVqdatWpYsmSJ2kVLevTogSdPnohvBgkJCTh//rxSG1NTU0yfPh2dO3fWKLm0s7PD5s2bMWbMGHExgUePHuHRo0dK7by9vfHNN98ozY3JalnxXr16oWLFipg1axbCwsIAyIf+pF9Xx8nJCSVKlMg25qwoxpSamor79+8rJe4Z206bNg29evXK9Hjjx4+Hk5MTfvnlF7x58waCICA0NDTLhRdq166t0zfSzOj6sWbH0tISM2bMwKxZs5CWloanT5+qLBxibm6uVXIJKO+Hl87a2hotWrTQ+BitWrXCzp07MWPGDNy6dQuAfPio4hDSjMqVK4fSpUvnLuhMeHl54dSpU0plWe3Tmc7Y2FhpxeKYmBillSczatiwIZYsWZLve99JQfF5n5ycjNu3b4v/ozOys7PDnDlz0LJlS7X1MpkM8+fPR82aNbF8+XLExsYiNTUVN2/eFL9UU3ef3Pb+55QuH6smKlWqhFGjRokLBKn7X+fk5KT16tm5fV0okuo9Jifs7OywatUqjB49Wvwi/NatW+L/pHRt27bF/PnzsxxRpahJkybYsWMHpk6dKn4mef78eaarzWb2f2H48OE4fvw47t+/j+TkZKWpQ+kUk0tAt5+NcqNYsWIwMjIS5wRHRERkOfe0Q4cOmDdvXrY94qR7TC6pUDA2NoalpSWKFy8OOzs71KhRAzVr1kSTJk1QpkyZHB/P0dERe/bswd9//41///0XN27cEOfA2Nraok6dOuKy8FmtvHb58mUEBATA398fwcHBePz4MSIjI5GYmIhixYqhdOnScHV1RYcOHdC6dess/wlOnDgRLVq0wLZt2xAYGIioqChYWFigdOnSaNGiBXr27CkOddFUhQoV4OfnB19fXxw+fBj3799HQkICHBwcUKtWLfTq1UtMytN752QyWbZDFBs3boyjR4/i2LFj+O+//3D9+nVEREQgLi4OxYoVg729PSpXrgw3Nze0bNlS/OZTG99//z369euHCxcu4Nq1a7h//z5evnyJ+Ph4GBoawtraGk5OTmjWrBm6dOmitLR7Zjw9PdG2bVscPnwY586dQ3BwMKKiopCQkAAzMzOUKlUKVapUQb169eDh4QFHR0etH4cm8uKxZqdXr15wcnLCzp07ce3aNbx58wbv37/X6fA5xf3w0ikOpdNUtWrV4Ofnh3PnzuH48eMICgrCmzdv8O7dO5iYmMDGxgaOjo6oU6cOmjdvDjc3N52viNumTRtYWloqzfvUpHemVKlSuHz5Mi5duoSAgADcunULT548EedGWVhYoGzZsqhVqxY8PT3RtGlTncatz1avXo3bt2/j4sWLuHHjBh48eIBXr17h/fv3MDExga2tLZydneHh4YHOnTtrNJT6iy++QNeuXbF//35cuHABISEhiIqKQlJSEiwsLFCqVCk4OTmhYcOG8PDwyNX7SW7kxWPNzoQJE+Du7o49e/bg5s2biIyMzNG+hprI7esiIyneY3LKzc0Nhw8fxubNm3Hq1CnxCzkHBwe4urrCy8srR1sSpatWrRr27duH48eP4/jx47h27RoiIiLw/v17WFpaonz58qhduzZatWqV6RdzlpaW8PX1xY4dO3Dq1Ck8ePAA7969y3b+pa4+G+VGnTp1cOHCBVy4cEHc6/XJkyeIiYlBWloaLC0tUaFCBdStWxdeXl5KK4hT/pIJBXmZNCLKF2FhYeKCKo6OjmrnZxIRERFR0ca+YiLKluKiSNoOiyIiIiKiwonJJRFl6fnz59iwYYN4u1OnThJGQ0RERET6isklURE2YcIE/PPPP5luLXH27Fn069dPnG/p7Oyco8VViIiIiKjo4JxLoiKsdevWeP78OSwsLODq6opy5crBxMQEMTExCA4OVtqI2NzcHNu3b5dkYQQiIiIi0n9cLZaIEB8fD39//0zrK1SogGXLljGxJCIiIqJMseeSqAh7+vQpjh07hoCAADx9+hTR0dGIiYmBsbExbG1t4erqilatWqFTp075sn8jERERERVcTC4LoPr16yMpKQkODg5Sh0JERERERBIKDw+HiYkJAgICpA6Fw2ILosTERKSmpkodBhERERERSSwlJQX60l/I5LIAKlmyJADgxIkTEkdCRERERERSatOmjdQhiLgVCREREREREWmNySURERERERFpjcklERERERERaY3JJREREREREWmNySURERERERFpjcklERERERERaY3JJREREREREWmNySURERERERFpjcklERERERERaY3JJREREREREWmNySURERERERFpjcklERERERERaY3JJREREREREWmNySURERERERFpjcklERERERERaY3JJREREREREWmNySURERERERFpzUjqAHQtNTUVDx48wM2bN3Hr1i3cvHkTISEh+PDhAwCgW7dumD9/fp6c+/Lly9izZw8CAwMRHh6OYsWKoVy5cmjdujV69+6NkiVL5sl5iYiIiIiIpFbokssJEybg33//zddzpqSkYNasWdi9e7dSeWJiImJjY3H79m1s2bIF8+bNQ9u2bfM1NiIiIiIiovxQ6JLL1NRUpdvW1tawtrZGWFhYnp3z+++/h5+fHwCgePHi6NmzJ2rUqIH379/j5MmT+O+//xAbG4sJEybgzz//ROPGjfMsFiIiIiIiIikUuuSydu3aqFKlClxdXeHq6ooKFSrAz88P06dPz5PznT59WkwsHRwcsHXrVlSqVEms7927N7Zs2YKff/4ZycnJ+O6773D06FGYmJjkSTxERERERERSKHTJ5ahRo/L1fMuXLxev//DDD0qJZboBAwbg/PnzOHXqFJ49ewY/Pz/06dMnH6MkIiIiIiLKW1wtVgtPnz7FzZs3AQDly5dHu3btMm07ePBg8frhw4fzOjQiIiIiIqJ8xeRSC2fOnBGvt2jRAjKZLNO29evXh7m5OQAgICAA8fHxeR4fERERERFRfmFyqYXQ0FDxeq1atbJsa2RkhBo1agAA0tLS8ODBgzyNjYiIiIiIKD8VujmX+enRo0fi9fLly2fbvnz58ggICBDvW7t27TyLjYiIiIj0iCAAKSlAcrL8kpqqeklLU74uCPKfGS+Z3UexTPGS8TiCIL+kx5XxZ3aXzO6n6f01Oa7i703d9axuZ/azsIqOBmxspI4CAJNLrbx79068bqPBH9Ta2lrtfYmIiIgoBxITgbg4+c+kpMwvycmqPxWvJyZ+vCQlKd9OTv6YDKakfLxkdVvxPoqX9PMR6VgigHeVKqE4k8uCLyEhQbxuamqabftixYqJ1znnkoiIiAolQfiY/L17J/+Z2SUhIesELz5efoz0S/oxmagRAQBMARTLtlX+YXKpI1kt5kNERESk1wRBnujFxgIxMco/FZO69J/qyhR/pqRI/YiIigxjqQNQwORSC+mrvwLAhw8fsm2v2MbCwiJPYiIiIqIiJi1NntQpJoSK19++lV8UryuWpV+YEOovAwNAJpP/TL8YGn78mfGiWJ/xkn4cQH49s5/ZXXJ6/9wcN53i7azqsoupsAoJkToCEZNLLRQvXly8Hh0dnW37mJgYtfclIiKiIkwQ5L19mSWHMTHyBTsyu7x9W/gXLNGUiQlgbKz8M/26Ynn6dVNT5YtiWXpbIyP5RfG6urKM9erOq1imLilUlxgW5qSINPbgwQMsXLgQK1asgLFxhr7KNm2kCUoNJpdacHR0xOXLlwEAz549Q6NGjbJs/+zZM6X7EhERUSGTkiJP+CIjgago+c/0S0SE+ktUlLz3sTCzsACKFwcsLZUv5uaZJ3empvL7WVrK75t+Sb9taQmYmX1MII2MmIhRoeTn54cvv/wSb9++RfHixfHrr79KHVKmmFxqwdnZWbweHByMHj16ZNo2JSUFt2/fBgAYGBigatWqeR4fERER6YAgyBPAFy8yv6QniQqjlAo0CwvA2hooUUI1qcuY4CmWZSy3sJBfDLi1OlFOJSUlYerUqVi2bJlYtnjxYrRo0QJdunSRLrAsMLnUQsuWLcXrZ8+ehSAImS7sExAQIK4uW79+faX5mkRERJSP3r8HwsOBN28+/kzvRVTsZVTsdSwoq5PKZICVlTwpTL9YW8vLFC8lSsh/Fi/+MYlM/2llJR+2SUSSefLkCXr16iWOklS0YMECeHl56eWCokwutVChQgXUqlULwcHBePbsGY4dO4b27durbbtp0ybx+ueff55PERIRERUBKSnyJDE9UczqEh4un9+oz4oXV00ObWwyv1hbK/cyspeQqEA7fPgwBg4ciKioKJW6fv36Ye3atXqZWAJMLjN1+fJlDBw4EABQrlw5nDx5Um27cePGYcSIEQCAOXPmwMXFBZ988olSm61bt+LUqVMAgPLly6N79+55GDkREVEhkJT0cc5iei/iy5fqL+Hh+jln0cICsLUF7OwAe/vML3Z2yr2GhoZSR05EEkhJScHMmTOxYMEClTpTU1MsX74cw4cP19vEEiiEyeXTp0+xe/dupbK7d++K12/fvo2lS5cq1bu6umba45gdDw8PdO/eHX5+fggPD0ePHj3g7e2NGjVq4P379zh58qSYWBobG2Pu3LkwMTHJ1bmIiIgKHEGQz0N8/frjIjeZXRSHob57J3Xk6pUsCZQtq3wpU0Zebmcnv9jayi/F9GlrcyLSZ8+fP0ffvn1x9uxZlboqVarA19cXbm5uEkSWM4UuuXzx4gXWrFmTaf3du3eVkk0A6NatW66TSwD46aefIJPJsGfPHrx79w4bNmxQaVOiRAnMmzcPjRs3zvV5iIiIJJeUpLoaavrP9GGpr19/vLx5o7/zFY2M5Emhg4P8kt7DmJ4kKt52cJAnkfyCmIh07NixY+jfvz/Cw8NV6nr06IH169ejRIkSEkSWc4UuuZSCkZER5s2bhy5dumD37t0ICgpCeHg4TE1NUa5cObRu3Rp9+vRByZIlpQ6ViIhIVfpqqC9fylc+zWz46Zs3+tujCMgXs7GzkyeMGS8ODkCpUvKf6WUlSnDrCiKSTGpqKubMmYOffvoJQoa9ao2NjbF48WKMHTtWr4fBZiQTMj4S0ntt/r9R6okTJySOhIiI9JYgAG/fKu+nGB6unEAq/kxKkjrizJUoIU8aS5WS9x6qu5QuLe9lNOL35kRUMMycORNz585VKf/kk0+wa9cuNGzYUKPj6FNuwP/AREREBUVa2seFbV69+nhRvK2YTKakSB2xMkvLj3MSbWw+zk1Mv51xKKqdnbyc22IQUSE0fvx4bNy4ES9evBDLOnfujE2bNsHW1lbCyHKPySUREZGU0he8UUwWX79W//PNGyA1VeqIPype/GOymJ4Mlir18VKypPJ1LnBDRCRycHDAzp070apVKwDA/Pnz8c033xSoYbAZMbkkIiLKC3FxHxPDjElixos+DUm1t89+6Gl6byN7FImItNKiRQusWLECtWrVQrNmzaQOR2tMLomIiHLi/Xv5HMXnzz/+TL+8eCG/vHoFJCRIHelHpqbyhWzs7eUJYvr2GYpbaZQtK+9h5GqoREQ6denSJbi4uMDGxkZt/ahRo/I5orzD5JKIiEgQ5NtrpCeHWc1pjI2VOlo5Bwd5opjeo1i69MchqPb2yhdzc66KSkSUzwRBwNKlSzF16lR4enpi3759BXrIqyaYXBIRUeGWkKDcu6jYw6h4SUyUOlL54jWlSn1MFDP7WbIkh6QSEemx6OhofPnll9i/fz8A4MCBA1iyZAm++eYbiSPLW0wuiYio4BIE+SI3YWHAo0cfL0+efEwko6OljdHcXDkxTN9OI723UbHXkQveEBEVeAEBAfD29kZYWJhS+dSpU9GmTRvUrVtXkrjyA5NLIiLSX2lp8qGoT54Ajx/LL0+efEwmw8Kkm9toYQGUK6d8KVtW/lMxYbS0lCY+IiLKV4IgYNWqVZg0aRKSMizUJpPJMH36dNSqVUui6PIHk0siIpJObCzw9OnHy5MnH68/fiz/mZycvzGVLKk6lzFjD2PZsoCVFecxEhERAODt27cYPnw4du3apVJnb2+PrVu3okOHDhJElr+YXBIRUd4RBCA8HAgNlV/u3pX/vHdPnki+e5d/sRQvDpQvr9zDmL5aavp1rpZKREQ5dP36dXh7e+PevXsqdc2aNcPOnTtRvnx5CSLLf0wuiYhIe0lJwIMHwJ07QEiI/JKeSMbE5P35jYyATz4BHB2BSpXkl/REMv1n8eJ5HwcRERUZgiDgzz//xLhx45CoZlG4KVOmYO7cuTAuQguwMbkkIiLNKPZC3rv3MYkMCZEnlqmpeXduExOgYkX55ZNP5D8dHT8mk+XKAYaGeXd+IiIiBXFxcRg9ejS2bt2qUmdjY4PNmzejc+fOEkQmLSaXRESkLC5Ouefx3j35JTQUePs2b85ZsqQ8YaxQ4eMlPZH85BN5vYFB3pybiIgoB548eYLPPvsMd+7cUalr0KABdu3ahUqVKuV/YHqAySURUVEVEyMfxnr79seft2/LF9LRNRMTwMkJcHYGXFzk1ytXlieR5csDpqa6PycREVEeKFmyJIqp2Trq66+/xqJFi2BShOfuM7kkIioKXr0CgoLkl8BA+c8nT3R/njJlgOrVgWrV5Emki4s8oaxYkcNWiYioUChWrBh8fX3h7u6Ot2/fonjx4tiwYQN69uwpdWiSY3JJRFSYpKUBDx8CN24A169/TCZfvtTdOUxNgSpV5EljtWrKyWSJEro7DxERkZ6qUqUKNmzYgLlz52LXrl2oWrWq1CHpBSaXREQFVUwMEBwsTyTTL8HBQHy89seWyeTDVp2d5Zf0Ia1OTvKhrOyFJCKiIiAhIQHm5uZq63r06IEuXbrAyIgpVTr+JoiICorwcOD0aeC//+SXW7e0P6aRkTxhrF4dqFHj48XZGTAz0/74REREBVBiYiK++eYbXLhwARcuXFA7xxIAE8sM+NsgItJXuk4mLSwANzfA3f3jxcVFvtgOERERAQAePXoEb29vBAYGAgAmTpyI1atXSxxVwcDkkohIX8TEAGfOACdPyi/Bwbk/lr09ULu2PJmsV0+eSFatyuGsREREWdi3bx8GDx6M2NhYsWzNmjVo0aIF+vXrJ2FkBQOTSyIiqSQkAOfPAydOyJPJwED5gjw5YWwsH8Zau7bypVQp+bxJIiIiylZycjKmTZuGJUuWqNQVK1YMqampEkRV8DC5JCLKL2lpwLVrwL//yi/nzwNJSTk7hoUF0Lw58Omn8ou7O4e1EhERaeHJkyfo3bs3Ll26pFLn4uICX19f1KpVS4LICh4ml0REeenZM+DYMXkyefw4EBGRs/tnTCbr1ZP3VhIREZHWjhw5ggEDBiAqKkqlrk+fPvjjjz9QvHhxCSIrmJhcEhHpkiDItwTZswfYuxe4eTNn9zc1BZo2BVq3ll8aNGAySUREpGMpKSn44Ycf8Msvv6jUmZiY4LfffsPIkSMh4xSTHGFySUSkrbQ0wN8f8POTJ5UPH2p+X0NDoGHDj8lkkybcAoSIiCgPvXjxAn379sWZM2dU6ipXrgxfX1+4u7tLEFnBx+SSiCg30tKAc+cAX195D+Xz55rf19kZaNcOaN9ePtTVyirPwiQiIqKPTpw4gX79+uHNmzcqdd27d8eGDRtQokQJCSIrHJhcEhHlxJ07wNatwLZtwOPHmt3HxgZo21aeULZrB1SqlKchEhERkXqrVq1SSSyNjIzw66+/4uuvv+YwWC0xuSQiys7r18DOncCWLfLtQjRRqRLQowfQrRvQuDH3lyQiItIDf/75J4KCghAWFgYAqFixInx8fNC4cWNpAyskmFwSEamTmgrs2wesXy9f6VWT/a2qVwe6d5cnlXXrcp9JIiIiPWNjYwNfX180a9YM7du3x+bNm2Frayt1WIUGk0siIkUJCcDGjcCSJZotzOPqCvTpI08oq1fP+/iIiIhIK/Xr18fFixdRt25dGBgYSB1OocLkkogIkA99XbkSWLUKiIzMum3p0kC/fsCAAUCdOuyhJCIi0iORkZGYMmUK5s+fj5IlS6ptw9Vg8waTSyIq2u7elfdSbt4MJCZm3s7CQj7k9YsvgDZtOIeSiIhID12+fBm9evXCkydP8PTpU/z9998w5Ht2vmE/MBEVTefPA127AtWqAX/8kXli6e4uTzxfvQL++ku+fQjfpIiIiPSKIAhYtmwZWrRogSdPngAAjh8/jrlz50ocWdHC5JKIio60NGD/fqBZM6B5c/n1zHh6AidPAgEBwMCBgKVl/sVJREREGouJiUGPHj0wceJEJCcnK9WtWbMGb9++lSiyoofDYomo8PvwQb435a+/yofBZsbYWD7s9Ztv5Av1EBERkV4LCgqCt7c3HqpZhM/DwwM7duyAlZWVBJEVTUwuiajwevtWvkDPb7/Jh7VmxtoaGDUKGDcOKFs238IjIiKi3BEEAWvXrsX48eORlJSkUj9jxgz8+OOPMDJiupOf+NsmosInIUG+8uv8+UBUVObtKlQAJk0Chg4FihfPv/iIiIgo1969e4eRI0dix44dKnW2trbYunUrOnbsKEFkxOSSiAqPpCRg/Xrgp5+Aly8zb1e7NjBlCtC7t3woLBERERUIwcHB6NmzJ0JDQ1XqmjRpAh8fH1SoUEGCyAjggj5EVBikpsrnVFavDowZk3li2aYN8M8/wLVr8rmVTCyJiIgKjI0bN6Jhw4ZqE8tvvvkGp0+fZmIpMfZcElHBJQjyFV9nzgRu3cq8XffuwHffybcVISIiogLHx8cHQ4YMUSm3trbGpk2b0KVLFwmioozYc0lEBU96UlmvHtCtW+aJZfv2wJUrwJ49TCyJiIgKsO7du6NJkyZKZfXr10dQUBATSz3C5JKICg5BAPbtkyeVXbsCV6+qb9ekCXDqlHwIbP36+RkhERER5QFjY2P4+PjAzs4OADB27FicO3cOjo6OEkdGijgsloj0X1qavKfyxx+B69czb1e7NjB3LvD554BMln/xERERUZ6rUKECtm3bhtjYWPTq1UvqcEgNJpdEpL8EAdi7V55U3riReTtnZ2D2bPnqrwYckEFERFRQ3b9/HykpKahWrZra+g4dOuRzRJQT/BRGRPpHEIBjx4CGDYEePTJPLF1c5KvE3r4N9O3LxJKIiKgA2717N9zd3dG9e3fExcVJHQ7lAj+JEZF+uXxZvmVI+/ZAQID6NtWqAdu3yxfy6d8fMDTM3xiJiIhIZ5KSkjB+/Hh4e3vj3bt3uHPnDsaMGQNBEKQOjXKIySUR6Ydbt+QrvzZuLF+MR53q1YEdO4CbN+U9lUwqiYiICrSwsDC0aNECy5cvVyrfsmULdu7cKVFUlFtMLolIWo8fA4MHyxfj2bdPfRtnZ3lSGRwM9OnDpJKIiKgQOHDgANzc3ODv769SN2DAAHh5eUkQFWmDySURSSMqCpg8WZ44bt4sXxE2o/LlgXXr5L2aTCqJiIgKheTkZHz77bfo0qULYmJilOqKFSuGP//8E5s3b4aFhYU0AVKucbVYIspfiYnAihXyLUOio9W3sbMDpk8HxowBzMzyNz4iIiLKM8+ePUPv3r1x4cIFlTonJyf4+vqiTp06EkRGusDkkojyR1oasHMn8N13QFiY+jaWlsCkScA33wBWVvkaHhEREeWtf/75B1988QUiIiJU6nr37o0//vgDVnz/L9A4LJaI8t6pU/JtRfr3V59YGhsDX38NPHgg39OSbyxERESFRmpqKr7//nt07NhRJbE0MTHBypUrsWPHDiaWhQB7Loko74SFAePHAwcOZN6md2/5ENkqVfItLCIiIsofCQkJ6NSpE06pWQne0dERvr6+qFevngSRUV5gzyUR6V5yMrBgAVCjRuaJZYsWwKVL8qGyTCyJiIgKJTMzM5QpU0alvGvXrggKCmJiWcgwuSQi3Tp3DnBzA6ZNA96/V62vVg3Yvx84fRpo1Cj/4yMiIqJ8I5PJsHbtWlSrVg0AYGRkhCVLlsDPzw/W1tbSBkc6x+SSiHQjMhIYNkzeI3nrlmp9yZLA6tXyvSq9vACZLP9jJCIionxnaWkJX19fuLi44OzZs5g4cSJk/BxQKHHOJRFpRxCAv/6S71mpZvU3yGTA6NHyeZX8hpKIiKjQio+Pz3Rvypo1a+LWrVsw5J7VhRp7Loko9+7cAVq1AgYPVp9Y1q0rn1e5ciUTSyIiokJKEAT8+uuvcHJywrNnzzJtx8Sy8GNySUQ5l5Ag36+yTh353MmMLC2BZcuAK1fkW5AQERFRoRQVFYUuXbpgypQpePnyJfr06YPk5GSpwyKJMLkkopw5cgSoWROYN0++KmxGPXoAISHyLUiMOPKeiIiosPL394e7uzsOHjwolp0/fx7fffedhFGRlJhcEpFmnj8HvL2Bzz8HHj1Sra9UCTh0CNi9GyhXLt/DIyIiovwhCAKWL1+O5s2b4/Hjx0p1MpkMFhYWEARBouhISuxWIKKspaTI50zOnAnExanWGxkBU6bI683N8z8+IiIiyjexsbEYOnQo9uzZo1Ln4OCAbdu2oV27dhJERvqAySURZS4wEBgxAggKUl/fooV8exFX1/yNi4iIiPLd1atX4e3tjQcPHqjUtWjRAjt37kTZsmUliIz0BYfFEpGqd++AiRPli/GoSyzt7ICNG+WL+TCxJCIiKtQEQcDatWvRpEkTtYnl9OnTcfLkSSaWxJ5LIsrgwAFg7Fjg6VP19UOHAgsWyBNMIiIiKtTi4uIwcuRIbN++XaXO1tYWW7ZsgaenpwSRkT5icklEcs+fA19/Dfj5qa+vUQNYuxZo3jx/4yIiIiJJ3Lx5E97e3ggJCVGpa9y4MXx8fFCxYkUJIiN9xWGxREVdaqp8wZ7q1dUnlqamwM8/A1evMrEkIiIqQvz8/NQmlhMnTsTp06eZWJIK9lwSFWUhIfJhrhcuqK9v0wZYswaoWjV/4yIiIiLJfffddzhz5gxOnDgBAChRogQ2btyIbt26SRwZ6Sv2XBIVRcnJwC+/AHXrqk8s7e2Bv/4Cjh1jYklERFREGRoaYtu2bShTpgzc3d0RFBTExJKyxJ5LoqLm2jVgyBD5MFd1vvwSWLSIC/YQERERSpUqhRMnTsDR0RHFihWTOhzSc+y5JCoqEhOBmTOBBg3UJ5ZVqgAnTwIbNjCxJCIiKiI+fPiAcePGITAwMNM21atXZ2JJGmHPJVFRcOmSfG7l7duqdQYG8j0t58wBzM3zPzYiIiKSxIMHD+Dt7Y2rV6/iyJEjCAwMhLW1tdRhUQHGnkuiwiw5Gfj+e6BZM/WJpasrcPEi8OuvTCyJiIiKED8/P7i7u+Pq/0czPXz4EEOGDIEgCBJHRgUZk0uiwurhQ6BlS/k2ImlpynVGRsAPPwCBgUDDhtLER0RERPkuKSkJEyZMQI8ePfD27Vulur///ht37tyRKDIqDDgslqgw2rYNGD0aePdOta5ePWD9eqBOnfyPi4iIiCTz+PFj9OrVC/7+/ip11atXh6+vL2rUqCFBZFRYsOeSqDB5+xYYMAD44gvVxNLICJg3Tz7/koklERFRkXLo0CG4ubmpTSy/+OIL+Pv7w9XVVYLIqDBhzyVRYXH5MtCvn3w4bEZVqwLbt8tXiiUiIqIiIyUlBTNnzsSCBQtU6kxNTbFixQoMHToUMplMguiosGFySVTQpaUBCxbIF+5JTVWt//JLYPlywNIy/2MjIiIiyTx//hx9+/bF2bNnVeqqVq0KX19f1K1bN/8Do0KLySVRQZaYCAweDOzcqVpnZQWsXQv06ZPvYREREZG0jh07hv79+yM8PFylztvbG3/++SesrKwkiIwKMyaXRAVVVBTQtSug5ttING0qX9SnUqX8joqIiIgkFhwcjA4dOqhsK2JsbIwlS5bgq6++4jBYyhNc0IeoIHr0SJ5AZkwsDQyAWbOA06eZWBIRERVRtWrVwvDhw5XKKlWqhPPnz2Ps2LFMLCnPMLkkKmiuXAEaNwbu3lUut7QEDh8GZs+WrwxLRERERdayZctQ5/+rw3t5eSEoKAgNuLAf5TF+AiUqSA4elM+hTEhQLi9bVp5YclI+ERERATAzM4Ovry8OHTqECRMmsLeS8gV7LokKilWr5HMsMyaWNWvK965kYklERFSkREREqF0JNp2TkxMmTpzIxJLyDZNLIn0nCMDUqcBXX8m3HVHUpg1w7hxQoYI0sREREZEkLly4ADc3N3Tu3BkP1e1xTSQBJpdE+kwQgEmTgIULVesGDQKOHAFKlMj/uIiIiEgSgiBg8eLF8PDwwLNnzxAbGwtvb298+PBB6tCImFwS6S1BAKZPB5YtU62bNQvYuBEwMcn3sIiIiEga0dHR6NatGyZPnoyUlBSxPCgoCLNnz5YuMKL/44I+RPpq9mxgwQLlMkNDYN064MsvJQmJiIiIpBEQEABvb2+EhYWp1H366aeYMGFCvsdElBF7Lon00dy5wJw5ymUyGfDXX0wsiYiIihBBELBixQo0a9ZMJbGUyWSYOXMmjh8/jtKlS0sTIJEC9lwS6ZtffwVmzlQt37AB6Ncv/+MhIiIiSbx9+xbDhg2Dr6+vSp29vT22bt2KDh06SBAZkXpMLon0yfLlwJQpquVr1wKDB+d7OERERCSN69evo2fPnrh//75KXbNmzbBz506UL19egsiIMsdhsUT6Ys0aYPx41fIVK4ARI/I/HiIiIsp3giBg3bp1aNSokdrE8ttvv8WpU6eYWJJeYs8lkT7YsAEYPVq1fPFi+f6WREREVOgJgoChQ4di48aNKnU2NjbYvHkzOnfuLEFkRJphckkktTVr1CeW8+bJ97gkIiKiIkEmk6F69eoq5Q0aNMCuXbtQqVKl/A+KKAcKdXJ5/PhxHDhwAMHBwYiIiIClpSUqVqyItm3bok+fPihevLhOzxcZGYk9e/bg3LlzuH//Pt6+fQtDQ0PY2NjA2dkZrVq1gpeXFywsLHR6XirAli0DJk5ULZ89W77HJRERERUp33zzDc6ePYuDBw8CAL7++mssWrQIJtzbmgoAmSAIgtRB6FpcXBwmT56MU6dOZdqmdOnSWLp0Kdzd3XVyzoMHD+LHH3/Eu3fvsmxXsmRJ/Prrr2jUqFGuz9WmTRsAwIkTJ3J9DNIDc+eqXxV2+nR5nUyW/zERERGR5KKiotCqVSv88MMP6NGjh9ThkJ7Tp9yg0CWXKSkpGDFiBM6fPw9Avkyzt7c3qlatitjYWBw6dAhBQUEAACsrK2zfvh1OTk5anfPkyZMYM2YM0n+Vzs7O8PT0RNmyZZGUlIRHjx7Bz88P0dHRAABTU1Ps3r0bzs7OuTqfPj2BKBcEQZ5UzpunWjd7NvDDD0wsiYiICrl3795lOYouLS0NBgZce5Oyp0+5QaFLLrdv344ff/wRAFC1alVs3rwZ9vb2Sm0WLFiADRs2AADc3d2xY8cOrc7Zvn17PH78GAAwevRojB8/HrIMyUFCQgJGjx6NS5cuAQDatWuHFStW5Op8+vQEohwSBPk8ymXLVOsWLAC+/TbfQyIiIqL85ePjg9GjR+PgwYNo1qyZ1OFQAadPuUGh+jokNTUVq1atEm8vXLhQJbEEgMmTJ4uTpYOCgnD27Nlcn/Px48diYmlvb49x48apJJYAYG5uju+++068feXKlVyfkwqotDT5wj3qEsvff2diSUREVMglJibiq6++Qp8+fRAdHY3evXsjPDxc6rCIdKZQJZf+/v7iC7Rhw4ZwdXVV287Q0BADBgwQbx8+fDjX54yMjBSvV6hQAYaGhpm2VVzhKyEhIdfnpAIoJQX48ktg7VrlcpkM+PNPYOxYaeIiIiKifPHw4UM0a9ZMqSPk+fPnGDBgANLS0iSMjEh3ClVyqdgD2bJlyyzbenh4iNfPnDmT63Mq9ow+e/Ysy38O6T2cALSe50kFSFoaMHgw8NdfyuWGhsDWrcDQoZKERURERPlj3759cHd3R2BgoEpdmTJlkJSUJEFURLpXqJLL0NBQ8XqtWrWybGtvb48yZcoAkPc+RkVF5eqcFStWFBfmCQ8PV/o2StGHDx/wyy+/iLeHMqEoOqZPB7ZtUy4zNgZ27QL69ZMmJiIiIspzycnJ+Oabb9CtWzfExsYq1ZmZmWHDhg3YuHEjihUrJlGERLpVqPa5fPTokXi9fPny2bYvX748Xr58CUA+VMHW1jZX5/3pp58wZMgQxMfH4/fff8exY8fQsWNHcbXYsLAw+Pn5ITIyEkZGRpgyZQo+//zzXJ2LCphVq4CFC5XLihUD9uwBPD2liYmIiIjy3JMnT9C7d29xMUdFLi4u8PX1zbYzhKigKVTJpeIekzY2Ntm2t7a2VnvfnKpbty527dqF77//HkFBQQgJCUFISIhSG5lMhn79+mHAgAGoXLlyrs9FBcjBg8C4ccplhobA3r3AZ59JExMRERHluSNHjmDAgAFqR8b17dsXa9euzXIbEqKCqlANi1VcJMfU1DTb9opt4uPjtTp31apVMWPGDDRv3lxtvSAI2Lt3L9atWyfud0mF2JUrQJ8+8vmWitatY2JJRERUSKWkpGDGjBn4/PPPVRJLExMTrFmzBtu2bWNiSYVWoeq5lMr79+8xc+ZMHDp0CKamphg3bhw6duyIChUqIDk5GaGhodixYwf2798PPz8/BAYGYv369ahQoYLUoVNeePgQ6NQJyLgi8KxZ8hVjiYiIqNB58eIF+vbtq3ahyMqVK8PX1xfu7u4SREaUfwpVcmlubi5Olk5MTISRUdYPLzExUbxuYWGRq3OmpaVhxIgR8Pf3h7GxMTZt2qT0j8PExARubm5wc3ODi4sLFi5ciMePH2PKlCnYuXNnrs5JeiwyEujYEXjzRrl80CB5cklERESFUkBAgNrEsnv37tiwYQNKlCghQVRE+atQDYtVHGKgydDTmJgYtffNiX///Rf+/v4A5P88svpGasiQIXB0dAQAXL16FTdu3MjVOUlPffgAdOkCKKxaDABo2xb44w/5npZERERUKHl5eWHixInibWNjYyxbtgy7d+9mYklFRqFKLtMTN0C+52R2FNvkdpGdU6dOidebNWuWZVuZTIYmTZqIt69fv56rc5IeSksDBg4Ezp9XLq9VC9i9GzAxkSYuIiIiyjfz589Ho0aNULFiRZw9exbjx4+HjF8uUxFSqJLL9P0mASA4ODjLthEREeI2JHZ2drnehuSNwvBHS0vLbNtbWVmJ1xMyzsmjgmvmTMDXV7msXDngyBGA31YSEREVCSYmJtizZw+uXr2KRo0aSR0OUb4rVMllixYtxOvqxrwrOn36tHjdw8Mj1+dUTChfvHiRbfvnz5+L1xW3QqEC7J9/gF9+US4rXlyeWGqw3yoREREVDGlpaZg/fz62bduWaZty5crlutOCqKArVMllw4YN4eDgAADw9/fHrVu31LZLTU3Fli1bxNueWmxmr9hbeujQoSzbvn37VimprV27dq7PS3ri5UtgwADlMiMjYM8egH9fIiKiQiMyMhKdO3fG9OnTMWLECNy+fVvqkIj0TqFKLg0NDTFmzBjx9tSpUxEZGanS7tdff8WdO3cAAO7u7ko9noouX74MFxcXuLi4oHXr1mrbeHp6wsBA/mu8dOkSli9fDkEQVNrFxcVh4sSJePv2LQDAxcUF1apVy9kDJP2SliZPLMPDlcsXLQLatZMmJiIiItK5S5cuwc3NDUeOHAEgn9rk7e2t9T7pRIVNodqKBAB69eqF48eP4/z587h37x66dOkCb29vVK1aFTExMTh8+DACAwMByFeInTNnjlbnq1KlCgYPHowNGzYAAFauXIlTp06hY8eOKF++PFJSUnD37l3s378f4f9PQkxMTDBr1ixO8C7oFiwATpxQLuvUCRg/Xpp4iIiISKcEQcCyZcvw7bffIiUlRanuzp07OH78OLp06SJRdET6p9All0ZGRli+fDkmT56MU6dOITw8HKtWrVJpV7p0aSxduhROTk5an/Pbb7+FiYkJ1q1bh9TUVNy+fTvToRIODg5YsGAB6tWrp/V5SUIXLgDff69cVq4csHEjtxwhIiIqBGJiYjBkyBDs3btXpa5kyZLYvn072rRpI0FkRPqr0CWXgHyRnTVr1uD48ePYv38/goODERkZCQsLC1SsWBHt2rVDnz59cr23ZUYymQwTJ05E9+7dsWfPHly5cgWPHj1CXFwcDAwMYGNjg2rVqqFly5bo0qWLRqvKkh6LigL69gVSUz+WGRgA27YB9vbSxUVEREQ6ERgYCG9vbzx69EilzsPDAzt27ECZMmUkiIxIv8kEdRMESa+lf0t2IuOQTMp7ggD06AFk/BZz9mxg1ixJQiIiIiLdEAQBq1evxsSJE5GUlKRS/91332H27NkwMiqU/TNUQOlTbsBXBlFOrF6tmlh6eMj3uSQiIqIC6927dxg+fDh8fHxU6uzs7LBlyxZ07NhRgsiICg4ml0Saun4dmDRJuczOTj4c1tBQmpiIiIhIazdu3IC3tzdCQ0NV6po0aQIfHx9UqFBBgsiICpZCtRUJUZ6Jjwd69wYSE5XLN2+WL+RDREREBVJsbCw8PDzUJpbffPMNTp8+zcSSSEN5llzGxcXhwYMHCAoKwpUrV/LqNET5Y+xY4O5d5bKJE4HPP5cmHiIiItKJEiVKYN68eUpl1tbW2LdvH3799VcYGxtLFBlRwaPTYbFxcXHYuXMnDh48iHv37iF9rSCZTKayNUdkZCTWr18PAHB2dkbXrl11GQqR7mzeDGzapFxWrx7wyy+ShENERES6NWrUKJw+fRo+Pj6oX78+du3aBUdHR6nDIipwdJZc+vv7Y/LkyQgPDwcAZLcIrZ2dHS5duoQ7d+7AysoKnp6eMDEx0VU4RLpx+zYwZoxyWfHiwM6dgKmpNDERERGRTslkMqxbtw41atTA1KlTYcr3eKJc0cmw2ICAAAwbNgzh4eFiUlmlShU4ODhkeb++fftCEAS8ffsWFy5c0EUoRLqTkAD06iX/qWjdOqBqVWliIiIiolx5//49Dh06lGl98eLF8cMPPzCxJNKC1sllYmIiJk2ahKSkJAiCgK5du+L06dM4fPgw2rdvn+V927VrBwMDeQhMLknvfP01cOuWctnIkfKFfYiIiKjAuHfvHpo0aQIvLy8cP35c6nCICi2tk8vdu3fjzZs3kMlk6Nu3L+bPn4+SJUtqdF9ra2txPHvGOZlEktq6Ffj/nGBR7drA0qXSxENERES54uvri3r16uH69esQBAH9+vXDixcvpA6LqFDSOrk8efIkAMDCwgKTJ0/O8f2rVKkCQRDw+PFjbUMh0o2QEGDUKOUyCwtg1y7AzEyamIiIiChHEhMTMW7cOPTq1Qvv3r0Ty8PDwzFs2DAJIyMqvLRe0Cc0NBQymQz169eHhYVFju9fokQJAFB60RNJ5v17+TzL+Hjl8rVrARcXaWIiIiKiHAkLC0OvXr3UbodXo0YN/PrrrxJERVT4ad1zGRMTAwAoVapUru6fvgBQWlqatqEQaW/CBCA4WLls2DCgf39JwiEiIqKcOXDgANzc3NQmlgMHDoS/vz9q1KghQWREhZ/WyaW5uTkA4MOHD7m6/+vXrwHI518SSWrnTuCPP5TLatYEfvtNmniIiIhIY8nJyfj222/RpUsXsfMjXbFixbB+/Xps2rQpVyPtiEgzWg+LdXBwQGxsLO7du5fj+yYnJ+PatWuQyWQoX768tqEQ5d79+8Dw4cpl5ubyeZb//wKFiIiI9NOzZ8/Qu3dvtbsPODk5Yffu3ahdu7YEkREVLVr3XNarVw8AEBISgidPnuTovvv27RPnWjZs2FDbUIhyRxDkiWVcnHL56tVA9erSxEREREQa+eeff+Dm5qY2sezduzcCAgKYWBLlE62Ty88++wyAfO7kjz/+qPHcydDQUCxYsAAAIJPJ0KlTJ21DIcqdDRuA//5TLhs8GBg4UIpoiIiISEOLFi1Cx44dERERoVRuYmKClStXYseOHbCyspIoOqKiR+vkskmTJmjQoAEEQcCFCxcwevRovHr1KtP2ycnJ2LZtG/r164e4uDjIZDJ06NABVatW1TYUopx79QrIuIVOxYrA779LEw8RERFpTF2PpKOjIy5cuIAxY8ZAJpNJEBVR0aX1nEtA/q1Rz549ERkZiTNnzqBNmzaoW7cuoqOjxTbz589HWFgYrly5goSEBHGV2PLly+PHH3/URRhEOTd+PJBh0j/WrAEsLSUJh4iIiDTXoUMHfPfdd/j5558BAF27dsXGjRu5UCSRRGRCepanpQcPHmDcuHF4+PCh/MCZfFOkeDonJyesWrUKFSpU0EUIRUabNm0AACdOnJA4kgLu0CGgc2flsr59ge3bpYmHiIiIciw1NRWenp7o2LEjxo8fz95KKnL0KTfQSc8lAFSpUgV+fn5Yv349tm/fjsjIyEzbWllZYeDAgRgyZIi4lQlRvnr3Dhg9WrnM1hZYtkyScIiIiChzsbGxsLKyUps4Ghoa4ujRozAw0Hq2FxFpSWfJJSDfQ+irr77CyJEjcfPmTVy7dg2vX79GXFwczMzMYG9vj9q1a8Pd3R0mJia6PDVRznz3HfDsmXLZ4sVAyZLSxENERERqnTt3Dn369MEPP/yAESNGqG3DxJJIP+g0uRQPamSEunXrom7dunlxeCLtXLoErFihXNamDTBokDTxEBERkYq0tDQsXrwY06dPR2pqKr7++ms0aNAAbm5uUodGRJng1zxUtCQlyfe0VJxqbGYGrF0LcI4GERGRXoiKikLXrl3x7bffIjU1FQCQmJgIb29vxMbGShwdEWVG657Lgf/fC7Bdu3YYMGBAju+/Y8cOHD16FDKZDJs3b9Y2HKKsLVoE3LypXDZ7NlCliiThEBERkTJ/f3/06tULjx8/VqmrVKkSkpKSJIiKiDShdXLp7+8PmUwGZ2fnXN3/yZMn4jGI8lRoKPDTT8pldesCkyZJEg4RERF9JAgCfv/9d0yePBnJyclKdTKZDLNmzcLMmTNhaGgoUYRElJ08mXNJpHfS0oARI4DExI9lBgbAn38CRnwZEBERSSk2NhZDhw7Fnj17VOocHBywfft2tG3bVoLIiCgnJP9Unb7vJb+Fojy1ZQtw+rRy2YQJQL16koRDREREclevXoW3tzcePHigUteyZUvs2LEDZcuWlSAyIsopyRf0CQ8PBwDud0l5JzYW+PZb5bJKlYA5cyQJh4iIiOQdDGvXrkWTJk3UJpbTp0/HiRMnmFgSFSCS9ly+evUKZ8+ehUwmQ/ny5aUMhQqzH38E3rxRLlu5ErCwkCYeIiKiIi4uLg4jR47E9u3bVepsbW2xZcsWeHp6ShAZEWkjR8nl9OnTM627ePFilvWKUlJS8Pr1a9y4cQMfPnyATCZD48aNcxIKkWZu3wZ+/125rHNngG9YREREknn69Cn27dunUt64cWP4+PigYsWK+R8UEWktR8nl3r171a7qKggCHj58iIcPH+YqCEtLy1xtY0KUJUEAvv4aSEn5WGZqCixdKl1MREREhOrVq2PVqlUYPHiwWDZp0iT88ssvMDExkS4wItJKjudcCoKgdMmsXNOLu7s7Nm/ejDJlyuj0gRFhzx7gxAnlssmTuaclERGRHhg0aBCGDh2KEiVKwM/PD4sXL2ZiSVTAyQTFDDEbe/fuVbotCAJmzJgBmUyGJk2aoHPnztmfUCaDiYkJbGxs4OzsDDs7u5xHXcS1adMGAHAiY+JEHyUkANWrA0+efCyrUAG4c4dzLYmIiPTE+/fv8erVKzg6OkodClGBpU+5QY6GxXbr1k2lbMaMGQCAypUrq60nksT8+cqJJQAsXszEkoiIKB/t2LEDISEh+PHHH9XWm5mZMbEkKkS0Xi22a9eukMlkqFOnji7iIdLew4fAwoXKZa1aAT17ShMPERFREfPhwwdMnDgRa9asAQDUq1cPXl5eEkdFRHlN6+Ry/vz5uoiDSHcmTgQSEz/eNjSUrxirZjEqIiIi0q0HDx7A29sbV69eFcsGDRqEq1evolKlStIFRkR5LscL+hDptb//Bg4cUC4bNw5wdZUmHiIioiLEz88P7u7uSoklAMTExMDHx0eiqIgovzC5pMIjMVG+9YiikiWB2bMlCYeIiKioSEpKwoQJE9CjRw+8fftWqc7c3BybN2/G1KlTJYqOiPKL1sNiMxIEAffv38edO3cQHR2N+Ph4pKWlaXTfsWPH6jocKkqWLQPu3VMumz8fKFFCknCIiIiKgsePH6NXr17w9/dXqatevTp8fX3hyhFEREWCzpLL5ORkrF+/Htu3b0d4eHiujsHkknLt9Wvgp5+Uyxo1AgYNkiYeIiKiIuDQoUMYOHAgoqOjVeq++OILrF69GpaWlhJERkRS0ElyGRMTgyFDhuDOnTvQZNtMmUym0k7GxVZIG4sWAfHxH2/LZMCKFYABR34TERHpWkpKCmbOnIkFCxao1JmammLFihUYOnQoP98RFTE6SS4nTJiA27dvAwDs7e3Rrl073Lx5E8HBwZDJZPjqq68QHx+P58+fIygoCBEREZDJZDAzM0P//v1hamqqizCoqHr9Gli1Srls0CCgfn1p4iEiIirEnj9/jr59++Ls2bMqdVWrVoWvry/q1q2b/4ERkeS0Ti7PnTuHS5cuQSaToWbNmti4cSMsLS3x008/ITg4GIDycNe0tDQcO3YM8+fPx6tXr3Du3DmsXbsWJUuW1DYUKqoWLQLev/9429AQ+P576eIhIiIqpARBQKdOnXDt2jWVOm9vb/z555+wsrLK/8CISC9oPWbw8OHD4vW5c+dmO67ewMAAHTp0wJ49e+Do6IiQkBBMmjRJ40V/iJRk1mtZubI08RARERViMpkMv/32GwwNDcUyY2NjrFixAj4+PkwsiYo4rZPL9G+uqlatCmdnZ43vZ2tri0WLFkEQBAQGBuLvv//WNhQqitT1Wn73nXTxEBERFXItW7bE3LlzAQCVKlXC+fPn8dVXX3F+JRFpn1ymz5/MmFgq/oNJSkpSe19XV1fUrFkTAHDw4EFtQ6Gihr2WREREkpgyZQrmzZuHoKAgNGjQQOpwiEhPaJ1cvv9/r1HGYRBmZmbi9Yyb6SpycnKCIAgIDQ3VNhQqathrSURElCfS0tKwa9euTHcBMDAwwPTp02FjY5PPkRGRPtM6ubSwsAAAJCYmKpUrJpvPnj3L9P7pvZoRERHahkJFCXstiYiI8kRERAQ+//xz9O7dG0uXLpU6HCIqQLROLsuWLQsAiIqKUiqvUqWKeD0wMDDT+4eEhACQTwYn0hh7LYmIiHTuwoULcHNzE9fCmDp1Ki5cuCBxVERUUGidXLq4uEAQBDx8+FCpvG7duuJKYjt37hSHzyo6cOAAHjx4AJlMBkdHR21DoaKCvZZEREQ6JQgCFi9eDA8PD6URZykpKejXr1+m62cQESnSOrls2LAhAODp06d48+aNWG5ra4sWLVpAEAQ8e/YM/fv3x/HjxxEWFoaQkBCsWLEC3yvsRdiuXTttQ6Gigr2WREREOhMdHY2uXbti8uTJSElJUaorXbo0Nm3aBBMTE4miI6KCxEjbA3z66acwNDREWloajh07hv79+4t1kydPxoULF5CcnIw7d+5g3Lhxao9RoUIFpfsRZYq9lkRERDpz5coV9OrVC2FhYSp1rVu3xvbt21GqVKn8D4yICiStey5tbW0xbdo0DBs2TGmFWEC+9+XKlSthaWkJQRDUXipVqoQ//vhDXBiIKEvstSQiItKaIAhYsWIFmjVrppJYymQy/PDDD/j333+ZWBJRjmjdcwkAAwYMyLSuRYsW+Oeff+Dr64uLFy/izZs3MDAwQPny5dGqVSt069aNQy1IM+y1JCIi0trbt28xbNgw+Pr6qtTZ29tj27ZtaN++vQSREVFBp5PkMju2trYYOXIkRo4cmR+no8KKvZZERERauX79Onr27In79++r1DVv3hw7d+5EuXLlJIiMiAoDrYfFEuWLiAj2WhIREWlh7969aNSokdrE8ttvv8XJkyeZWBKRVvQiuXz69ClmzpwpdRikz1auZK8lERGRFmrVqgVTU1OlMhsbGxw8eBALFizgnuNEpDVJk8uHDx9i6tSp6NixI/bs2SNlKKTP4uOB339XLuvXj72WREREOVC1alVs2LBBvN2wYUNcvXoVnTp1kjAqIipM8mXOZUahoaFYvXo1/v33X6SlpUEQBMhkMilCoYJg40YgMlK5bMoUaWIhIiIqwHr06IHx48dDJpNhwYIFXFSRiHQq18nltWvXcOXKFbx58waJiYmws7ND3bp10axZMxgZqT/svXv38Ntvv+HkyZPiViTpSSWXuia1UlKAxYuVyzw9gVq1pImHiIhIz71//x4ymQzFihVTW7906VJ+qU9EeSLHyeWNGzfwww8/4O7du2rry5Urh19//RV169YVy96+fYv58+dj//79Yk9lutKlS2P48OHo2bNnzqOnws/XF8i4sfPUqZKEQkREpO/u3r0Lb29vNG/eHKsyLoT3f0wsiSiv5Ci5DAgIwPDhw/Hhwwe1Q1kFQcCzZ88wdOhQbNy4EbVr10ZwcDDGjRuH169fKyWVZcuWxciRI9G9e3dOICf1BAFYsEC5rFEjoEULaeIhIiLSYz4+Phg2bBji4uIQHByMFi1aoG/fvlKHRURFiMYL+iQnJ2PatGl4r7BipyAIsLKygoODg9JQ2Pj4eMydOxePHz/GkCFD8Pr1a7GufPny+Omnn/Dvv/+id+/eTCwpc8eOAdevK5d9+y3Ab1yJiIhEiYmJ+Oqrr9CnTx/ExcWJ5cOHD0dISIiEkRFRUaNxz+WxY8fw7Nkzsbdy0KBBGDRoEMqWLQsASElJwcWLF7Fo0SKEhobixo0bGDVqFN69ewcAMDc3x9dff43+/fszoSTNLFyofNvZGejSRZpYiIiI9NDDhw/Rq1cvBAYGqtQ5OjrCwEAvdp0joiJC4+Ty1KlT4vVvvvkGw4YNUz6QkRFatGgBNzc39OzZE2FhYXj06BEA+TzMP//8E46OjjoKmwq9wEDgxAnlssmT5ftbEhEREfbt24fBgwcjNjZWpW7IkCH4/fffYW5uLkFkRFRUafx11u3btwEAtra2GDx4cKbtLC0tMWrUKPG2TCbDwoULmVhSzmTstSxdGhgwQJpYiIiI9EhycjK++eYbdOvWTSWxNDMzw8aNG7F+/XomlkSU7zTuuYyIiIBMJoO7u3umW42ka9KkCQB5Yunk5IR69eppFyUVLQ8eALt3K5eNHw9ksqQ6ERFRUfHkyRP07t0bly5dUqlzcXHB7t27UbNmTQkiIyLKQc9l+gRxOzu7bNs6ODiI1ytXrpyLsKhIW7wYSEv7eLt4cUChN5yIiKgoOnLkCNzc3NQmlv369UNAQAATSyKSlMbJZWpqKgBk22sJQGnyuCbJKJHozRtg40blspEjAWtrScIhIiKSWkpKCqZPn47PP/8cUVFRSnWmpqZYs2YNtm7dCktLS4kiJCKSy9E+l7nBjXopR37/Hfjw4eNtY2NgwgTJwiEiIpJaXFwcdu7cqVJepUoV+Pr6ws3NTYKoiIhUcX1q0h9xccDKlcplX3wBlCsnTTxERER6wNraGrt27VLayq1Hjx4IDAxkYklEeiXHPZfx8fF48eJFnrRP3zOTiqg//wSio5XLpkyRJhYiIiI90qBBAyxZsgSTJk3C4sWLMXbsWI4OIyK9IxMEQdCkYbVq1XL0Tyz9sJreRyaTidudUNbatGkDADiRcR/Igiw1FahaFQgL+1jm5QXs3y9ZSERERPpEEATcu3cPzs7OUodCRHpEn3KDXA2LFQQh24tMJhMTS03aa5jjUmH1zz/KiSXAXksiIipSzpw5g0GDBomLKGYkk8mYWBKRXstRcpmTJJBJI+XI6tXKt93cgGbNpImFiIgoH6WlpWH+/Plo1aoV/vrrL8ybN0/qkIiIckXjOZf60M1KhdTjx8Dhw8plo0cDnEtCRESFXGRkJAYOHIgjR46IZbNmzULTpk3FoW5ERAWFxsllOa7YSXll3TpAsYe7eHGgb1/p4iEiIsoHFy9eRO/evfH06VOlckEQsHHjRiaXRFTgcCsSklZSknyVWEUDBwLcCJqIiAopQRCwdOlStGzZUiWxlMlkmDNnDjZv3ixRdEREuZfjrUiIdGr/fuD1a+WyUaOkiYWIiCiPxcTE4Msvv8S+fftU6kqVKoXt27ejdevW+R8YEZEOMLkkaWVcyKd5c6BmTWliISIiykOBgYHw9vbGo0ePVOo8PDywY8cOlClTRoLIiIh0g8NiSTohIcCpU8plo0dLEwsREVEeEQQBq1atQtOmTdUmlt999x2OHz/OxJKICjz2XJJ01q5Vvm1vD/ToIU0sREREeeDt27cYMWIEfHx8VOrs7OywZcsWdOzYUYLIiIh0j8klSSMhAdi0SblsyBDA1FSScIiIiPLCV199pTaxbNq0KXbu3IkKFSpIEBURUd7gsFiSxq5dQEyMctnIkZKEQkRElFfmzZsHOzs7pbLJkyfjv//+Y2JJRIUOk0uSRsaFfDp0ACpXliYWIiKiPFKhQgVs2bIFAGBtbY19+/Zh0aJFMDY2ljgyIiLd47BYyn9BQYC/v3IZF/IhIqJCqmPHjli7di3atWsHR0dHqcMhIsoz7Lmk/LdmjfLt8uWBzz+XJhYiIiId8PPzQ1xcXKb1I0aMYGJJRIUek0vKX7GxwLZtymXDhwNG7EQnIqKC5/379xgxYgR69OiBMWPGQBAEqUMiIpIMk0vKX1u3yleKTWdoCAwdKl08REREuXTv3j00adIE69atAwBs2bIF69evlzgqIiLpMLmk/CMIqgv5dOkClCsnTTxERES55Ovri3r16uH69etK5ePGjcOzZ88kioqISFp5NhYxLi4Or1+/RmxsLFJTU9GgQYO8OhUVFOfOAbduKZdxIR8iIipAEhMTMXnyZKxYsUKlztzcHGvXrkX58uUliIyISHo6TS7j4uKwc+dOHDx4EPfu3RPnHchkMty+fVupbWRkpDh0xNnZGV27dtVlKKSP1q5Vvl21KtC6tTSxEBER5dCjR4/Qq1cvBAQEqNS5urrC19cX1atXlyAyIiL9oLPk0t/fH5MnT0Z4eDgAZDuh3c7ODpcuXcKdO3dgZWUFT09PmJiY6Coc0jdv3wJ+fsplI0cCBhyZTURE+m///v0YPHgwYmJiVOoGDhyIVatWwcLCIv8DIyLSIzr5ZB8QEIBhw4YhPDxcTCqrVKkCBweHLO/Xt29fCIKAt2/f4sKFC7oIhfSVry/w/v3H20ZGwMCB0sVDRESkgeTkZEyePBldu3ZVSSyLFSuG9evXY9OmTUwsiYigg+QyMTERkyZNQlJSEgRBQNeuXXH69GkcPnwY7du3z/K+7dq1g8H/e66YXBZymzYp3/b0BEqWlCQUIiIiTTx9+hQeHh5YvHixSp2TkxMuX76MIUOGQCaTSRAdEZH+0XpY7O7du/HmzRvIZDL06dMHs2bN0vi+1tbWcHR0xMOHD1XmZOrC8ePHceDAAQQHByMiIgKWlpaoWLEi2rZtiz59+qB48eI6PycAhIWF4cCBAzh79ixevHiB2NhYlChRAnZ2dqhVqxYaNmyI9u3bw8zMLE/Or3cePJAv5qNo8GBJQiEiItJEUFAQ2rdvj8jISJW6Pn364I8//sizzxFERAWV1snlyZMnAQAWFhaYPHlyju9fpUoVPHjwAI8fP9Y2FFFcXBwmT56MU6dOKZVHRUUhKioK165dw9atW7F06VK4u7vr7LxJSUlYunQptmzZguTkZKW6iIgIRERE4O7du9i9ezecnZ2LzqT/v/5Svm1nB3z+uTSxEBERacDZ2Rn29vZKyaWJiQmWLVuGUaNGsbeSiEgNrZPL0NBQyGQy1K9fP1fzDUqUKAEAePfunbahAABSUlLw9ddf4/z58wAAe3t7eHt7o2rVqoiNjcWhQ4cQFBSEV69eYeTIkdi+fTucnJy0Pm9iYiLGjRuH06dPAwAsLS3Rvn171K5dGyVKlEBMTAxev36NoKAgBAUFaX2+AiMtDdi8WbmsXz+AizcREZEes7S0hK+vLxo1aoT379/D0dFR3NuSiIjU0zq5TJ/cXqpUqVzdP30BoLS0NG1DAQDs2rVLTCyrVq2KzZs3w97eXqzv378/FixYgA0bNuDt27f44YcfsGPHDq3PO3fuXDGxbNWqFebNmwdbW1u1bWNiYorOyrhnzgAZe6UHDZImFiIiohyoVasWVq5ciQMHDmDjxo2wtraWOiQiIr2m9YI+5ubmAIAPHz7k6v6vX78GAJ38w05NTcWqVavE2wsXLlRKLNNNnjxZHJIaFBSEs2fPanXeS5cuwcfHBwDg5uaGFStWZJpYAvLHmv57K/QyLuRTsyagw6HIRERE2khNTUV0dHSm9V9++SX8/PyYWBIRaUDr5NLBwQGCIODevXs5vm9ycjKuXbsGmUyG8uXLaxsK/P39xX02GzZsCFdXV7XtDA0NMWDAAPH24cOHtTrvunXrxOszZ86EkZHOtg8t2OLigN27lcsGDQI4T4WIiPTAmzdv0LFjR3h5eamslaCI8yuJiDSjdXKZPvcgJCQET548ydF99+3bJ861bNiwobahKPVAtmzZMsu2Hh4e4vUzZ87k+pwvX74Uh+HWqFEDNWvWzPWxCh0/PyA+/uNtAwOgf3/p4iEiIvq/s2fPws3NDceOHcO5c+cwc+ZMqUMiIirwtE4uP/vsMwDyuZM//vijxnMnQ0NDsWDBAgDybwQ7deqkbSgIDQ0Vr9eqVSvLtvb29ihTpgwAIDIyElFRUbk6Z0BAgDhvtHHjxgCAU6dOYfTo0WjevDlq1qyJZs2aYciQIdi2bRuSkpJydZ4CKeOQ2M8+A/7/OyciIpJCWloaFixYgFatWuHFixdi+cKFC3Ho0CEJIyMiKvi0Ti6bNGmCBg0aQBAEXLhwAaNHj8arV68ybZ+cnIxt27ahX79+iIuLg0wmQ4cOHVC1alVtQ8GjR4/E65oMs1Vs8/Dhw1yd8+bNm+L1SpUqYdKkSRg1ahROnjyJ8PBwJCcnIyIiAufPn8ecOXPg6emJu3fv5upcBcrjx0CGrWC4kA8REUkpMjISXl5emDZtGlJTU5XqypQpI65gT0REuaOTyYGLFi1Cz549ERkZiTNnzqBNmzaoW7eu0gT5+fPnIywsDFeuXEFCQoLY21e+fHn8+OOPughDaTsTGxubbNsrTs7P7VYo6XM8AWDDhg0ICwuDgYEBOnbsiCZNmsDc3BwPHz7E7t278erVKzx9+hQDBw7E3r17UbZs2Vyds0DIuLeltTXg5SVJKERERJcvX0avXr3UTuFp27Yttm3bhpIlS0oQGRFR4aGT5LJ06dLYvHkzxo0bh4cPHyI1NVXcyzF9Evzm/+91mJ5UAoCTkxNWrVoFKysrXYSBhIQE8bqpqWm27RXbxCvODcyB2NhY8XpYWBhMTEywdu1aNG3aVKndkCFDMGrUKPj7+yMmJgY//vgj1q5dm6tz6j1BUN3bsk8foFgxaeIhIqIiSxAELF++HFOmTFFZtEcmk2HWrFmYOXMmDA0NJYqQiKjw0HpYbLoqVarAz88P48aNg52dHQRBUHsBACsrK4wdOxY+Pj6oUKGCrkKQhGKyDACjRo1SSSwBwMLCAkuWLIGZmRkA4L///kNYWFh+hJj/zp8HHjxQLhs8WJJQiIio6IqJiUHPnj0xYcIElcTSwcEB//77L2bNmsXEkohIR3S6Z0axYsXw1VdfYeTIkbh58yauXbuG169fIy4uDmZmZrC3t0ft2rXh7u4OExMTXZ4agHzPzfSexMTExGy3BElMTBSvW1hY5OqcGe/Xp0+fTNs6ODigTZs24oIBFy9eRKVKlXJ1Xr2WsdfSxQXQwWrAREREmgoKCoK3t7faNRVatmyJHTt2FO7pKUREEsiTDRmNjIxQt25d1K1bNy8On6nixYuLyWV0dHS2CWNMTIzSfXNDcUhvmTJlYGdnl2V7V1dXMbnM6dYtBUJCArBrl3IZ97YkIqJ8IggC1q5di/Hjx6tdoX369OmYM2cO96QmIsoDOhsWqw8cHR3F68+ePcu2vWKbypUr5+qciveztLTMtr1iEhsXF5erc+q1ffuAt28/3pbJgAEDJAuHiIiKlrS0NOzevVslsbS1tcXhw4cxb948JpZERHlE6+Tyv//+U1nOWyrOzs7i9eDg4CzbRkRE4OXLlwAAOzs72Nra5uqc1apVE69rsuLsW4XES5NktMDJOCS2bVtAg21hiIiIdMHQ0BDbtm1D6dKlxbLGjRvj6tWr8PT0lDAyIqLCT+vkctSoUWjevDl+/vlnXL9+XRcx5VqLFi3E62fOnMmy7enTp8XrHh4euT5n/fr1xeG3r169QkRERJbtb926JV7PbW+p3nr2DDh2TLmMC/kQEVE+K1WqFHbu3AlDQ0NMmjQJp0+fRsWKFaUOi4io0NPJsNiYmBhs27YNffr0QYcOHbBixQpJ5hM2bNgQDg4OAAB/f3+lRE5RamoqtmzZIt7W5ptMU1NTdOjQQby9c+fOTNuGh4fjxIkTAAADAwM0a9Ys1+fVSzt2yLchSVe8ONC1q2ThEBFR0eXh4YGQkBAsXrw4TxYRJCIiVVonl+XLl1faauTJkydYuXIlOnTogN69e2Pbtm2IiorSRazZMjQ0xJgxY8TbU6dORWRkpEq7X3/9FXfu3AEAuLu7K/V4Krp8+TJcXFzg4uKC1q1bZ3rer776SnzjWrt2LS5evKjSJj4+HpMmTcKHDx8AAJ9//nnhW6Xu8GHl2716Aebm0sRCRESF2p07d+Dl5aW033RGVatWzceIiIhIJmTcqDEXrl69ioMHD+Lo0aOIjo7+ePD/rxBqaGiI5s2bw8vLC23atIGpqam2p8xUSkoKRowYgfPnzwOQb//h7e2NqlWrIiYmBocPH0ZgYCAA+eI6O3bsgJOTk9pjXb58GQMHDgQAlCtXDidPnsz0vDt27MDs2bMByHslPT090bRpU5iZmeHhw4fw9fXFq1evxGPt3r071/M827RpAwBiL6heePsWsLMDUlI+lh08CHTqJF1MRERUKG3fvh0jRoxAfHw8unfvjt27d4ufOYiIihp9yg10klymS0lJwblz53DgwAGcPHlS7KUDPiaa5ubmaN++Pby8vNC4ceM8eTOIi4vD5MmTcerUqUzblC5dGkuXLoW7u3umbXKSXALAtm3bsHDhQqXHnZGrqytWrlyJMmXKZPMoMqdPTyDRvn1At24fb5uYAJGRQGFctIiIiCTx4cMHTJgwAWvXrlUqX7ZsGcaPHy9RVERE0tKn3ECna3EbGRnh008/xaeffor4+HgcO3YMBw8exKVLl8QVZePj47Fv3z7s27cPDg4O6NSpE7y8vJRWXdWWpaUl1qxZg+PHj2P//v0IDg5GZGQkLCwsULFiRbRr1w59+vTJ9d6Wmenfvz9atmwJHx8fnDlzBi9fvsT79+9hY2OD2rVrw9PTEx07doSBQaHaAUbu77+Vb7dowcSSiIh05v79+/D29sa1a9dU6rZu3YqvvvqKW4wQEUlMpz2XmYmIiMChQ4dw4MAB3L59WzmA//dcVq1aFQcPHszrUAoFffp2AoB8ER9HR+Dx449lixYBkydLFxMRERUae/bswZAhQ5S280o3bNgwLF++HGZmZhJERkQkPX3KDfKlC83e3h6DBw+Gn58fjhw5gpEjR6L8//c+TF8I6P79+/kRCuWFu3eVE0sA+OwzaWIhIqJCIykpCRMmTEDPnj1VEktzc3Ns3rwZ69atY2JJRKQn8n18ZuXKlTFx4kRs374d7du3z+/TU17IOCS2XDnA1VWaWIiIqFB4/PgxWrRogd9++02lrnr16vD39xfXRSAiIv2Qr5MTMs7DTEtLg0wmQz6MzKW8lDG5/OwzgKv2ERFRLh06dAgDBw5UWoE+3RdffIHVq1fDkvP6iYj0Tp4nl6mpqThz5gwOHjyIU6dOiSupKiaUVlZW+IzDKAum9++B06eVy/i3JCKiXEhOTsbMmTOxcOFClTpTU1OsWLECQ4cO5bYjRER6Ks+Sy6CgIBw4cAB///23uMGxYkJpbGwMDw8PeHl54dNPP4WJiUlehUJ56fRpQHHrFUNDoG1b6eIhIqICa9myZWoTSycnJ/j6+qJOnToSREVERJrSaXL54MEDHDhwAIcOHcKLFy8AKCeUMpkM7u7u8PLyQseOHWFlZaXL05MUMg6JbdwYsLaWJBQiIirYxo4di23btuH69etimbe3N/78809+ZiAiKgC0Ti7fvHmDw4cP4+DBg7hz545YrphUVqlSBZ07d4aXlxfKli2r7SlJn6ibb0lERJQLZmZm2LVrF+rXr48PHz5g6dKlGDNmDIfBEhEVEFonl59++qmYSComlPb29vj888/h5eUFV64cWjg9eiTfhkQRk0siItKCs7Mztm3bhtKlS6NBgwZSh0NERDmgdXKZlpYmXjc3N0fbtm3h5eWFpk2bwsAg33c6ofz0zz/Kt+3tAXd3aWIhIqIC47///kPFihVRuXJltfWdO3fO54iIiEgXtE4ujYyM0KRJE3h5eaFdu3YoVqyYLuKigiDjkNgOHQB+oUBERJlIS0vD/Pnz8f3338PNzQ3nz5+Hqamp1GEREZGOaJ1cnjlzBra2trqIhQqSpCTgxAnlMg6JJSKiTERERGDAgAH4+/9fTAYGBuKbb77BihUrJI6MiIh0RetuJiaWRdSFC0BcnHJZ+/bSxEJERHrt/PnzcHNzExPLdCtXrsTJkyclioqIiHSNYxgpdzIOia1XDyhZUppYiIhILwmCgF9//RUeHh549uyZUp2BgQHmzZuHTz/9VJrgiIhI53S6zyUVIdyChIiIshAdHY3BgwfjwIEDKnWlS5fGjh07mFgSERUyGiWX06dPF6/LZDLMmzdPbZ02Mh6X9NiLF4DCBtcAmFwSEZHoypUr6NWrF8LCwlTqWrduje3bt6NUqVL5HxgREeUpjZLLvXv3Km1grJgEZqzTBpPLAuLff5VvlygBNG4sTSxERKQ3BEHAypUrMWnSJCQnJyvVyWQyfP/99/jhhx9gaGgoUYRERJSXNB4WKwgCAKhNJNPrtKGrBJXyQcYhsW3bAkYcYU1EVJTFxsZi+PDh8PX1ValzcHDA1q1b0Z4LvxERFWoaZQS//PJLruqoEEpNVe255JBYIqIi7dWrV2jRogXu37+vUte8eXPs3LkT5cqVkyAyIiLKTxoll926dctVHRVCV64A0dHKZR06SBMLERHphZIlS6JatWoqyeXUqVPx888/w4ijW4iIigRuRUI5k3FIrKsrUKGCNLEQEZFeMDAwwObNm1GxYkUAgI2NDQ4ePIj58+czsSQiKkKYXFLOcAsSIiJSw9bWFrt27UKLFi1w9epVdOrUSeqQiIgon2n9deLAgQMBAO3atcOAAQNyfP8dO3bg6NGjkMlk2Lx5s7bhUF6KjAT8/ZXLmFwSERUpERERsLe3V1vXqFEjnD59mov0EREVUVonl/7+/pDJZHB2ds7V/Z88eSIeg/Tc8eOA4srA5uZA8+bSxUNERPkmISEB48aNw7///ourV69mmmDy/ZyIqOjisFjS3H//Kd/28ACKFZMkFCIiyj93795F48aNsWHDBjx79gwDBgxAWlqa1GEREZGekTy5TN8jkxsqFwCnTyvfbtVKmjiIiCjf+Pj4oH79+ggODhbL/v77b8yfP1/CqIiISB9JnlyGh4cDAMzNzSWOhLL05g1w545ymYeHNLEQEVGeS0xMxFdffYU+ffogLi5Oqc7S0hKVK1eWKDIiItJXkq4P/urVK5w9exYymQzly5eXMhTKzpkzyrctLAA3N2liISKiPPXw4UP06tULgYGBKnW1atWCr68vXFxcJIiMiIj0WY6Sy+nTp2dad/HixSzrFaWkpOD169e4ceMGPnz4AJlMhsaNG+ckFMpvGYfENmsGGBtLEwsREeWZvXv34ssvv0RsbKxK3ZAhQ/D7779ztBEREamVo+Ry7969aleBEwQBDx8+xMOHD3MVhKWlZa62MaF8lLHnkkNiiYgKlaSkJEybNg1Lly5VqTMzM8Pq1asxaNAgCSIjIqKCIsfDYgXFrSg0KM+Ou7s7Zs6ciTJlyuTq/pQPoqIAhYUcADC5JCIqRJ48eYLevXvj0qVLKnXVqlWDr68vatasKUFkRERUkOQoufzll1+UbguCgBkzZkAmk6FJkybo3LlztseQyWQwMTGBjY0NnJ2dYWdnl7OIKf+dPau8v6WZGdCggXTxEBGRzhw5cgQDBgxAVFSUSl2/fv2wdu1aWFpaShAZEREVNDlKLrt166ZSNmPGDABA5cqV1dZTIZBxvmWTJoCJiTSxEBGRTh07dkwlsTQ1NcXy5csxfPhwtdNhiIiI1NF6tdiuXbtCJpOhTp06uoiH9FHG5JJDYomICo0FCxbg4sWLuHz5MgCgSpUq8PX1hRtXBCciohzSOrnkJsqFXGwscO2aclnLlpKEQkREumdiYgIfHx+4ubmhTZs2+PPPP1GiRAmpwyIiogJI0n0uqQA4dw5IS/t428QEaNRIuniIiEjnPvnkEwQGBqJSpUocBktERLlmIHUApOcybkHSqJF8QR8iIiowXr9+ja5du+LOnTuZtnF0dGRiSUREWtGo53Lfvn1Kt7t27ZppnTYUj0t6gvMtiYgKtDNnzqBPnz54+fIl7t+/j8uXL8PCwkLqsIiIqBDSKLmcNm2a+G2mTCZTSgIV67SR8bikB+LigIAA5TIml0REBUJaWhoWLFiAmTNnIu3/0xtu3bqFMWPGYNOmTeylJCIindN4zqWguM9hDuqoALtwAUhN/XjbyEi+DQkREem1yMhIDBgwAEePHlWpO3HiBN68eYNSpUpJEBkRERVmGiWXWe1fyb0tC7GMQ2IbNAA4lIqISK9dvHgRvXv3xtOnT1Xq2rdvj61bt8LBwUGCyIiIqLDTKLn85ZdfclVHBVzG5JJbkBAR6S1BELBs2TJ8++23SElJUaozMDDAjz/+iBkzZsDAgGv5ERFR3uBWJKReQgLg769cxvmWRER6KTo6GkOGDFG7yF6pUqWwY8cOtGrVKv8DIyKiIoXJJal3+TKQnPzxtoEB0KyZdPEQEZFaAQEB6NWrFx49eqRS9+mnn2LHjh0oXbq0BJEREVFRw7ExpF7GIbHu7oCVlTSxEBGRCkEQsHLlSjRr1kwlsZTJZJg5cyaOHz/OxJKIiPJNvvZc3rx5E6dOnUJERARsbW3h4eGBunXr5mcIpCnub0lEpNf++ecfjB07VqXc3t4eW7duRYcOHSSIioiIijKtk8u4uDjMmTMHAODi4oKhQ4eqbTd37lxs3bpVqWzNmjXw8vLCvHnzYGhoqG0opCuJicClS8plTC6JiPRKhw4d0Lt3b/j4+IhlzZo1w86dO1G+fHkJIyMioqJK62Gxly5dwoEDB3Dw4EHY2tqqbePr64stW7ZAEASVy4EDB7jirL7x9wc+fPh4WyYDmjeXLh4iIlIhk8mwbt06ODs7AwCmTJmCU6dOMbEkIiLJaN1zef78eQCAoaEh2rRpo1KfmpqK33//HYD8jdDJyQnNmjXDixcv8O+//0IQBOzYsQN9+/ZFlSpVtA2HdCHjkNjatQEbG2liISKiTBUvXhy+vr4ICwuDl5eX1OEQEVERp3Vyefv2bQBA5cqVYaVmwZcLFy7gzZs3kMlkaNCgAdavXw9jY2MAwKZNmzB//nykpaVh//79mDRpkrbhkC6cOaN8m0NiiYgkc/v2bURGRqJFixZq62vXro3atWvnc1RERESqtB4WGx0dDZlMhsqVK6utP6OQqIwcOVJMLAGgX79+KFGiBAAgMDBQ21BIF5KTgQsXlMuYXBIRSWLr1q1o0KABevbsiRcvXkgdDhERUZa0Ti6joqIAQEwSMwoICAAAWFpaonHjxkp1JiYmqFOnDgRBQFhYmLahkC4EBgLx8cplLVtKEwsRURH1/v17jBgxAgMGDEBCQgLevHmDvn37IiUlRerQiIiIMqV1cvnh/wu/CIKgUhcfH4/Q0FDIZDK4u7urXRHW3t4eAPDu3TttQyFdyDjf0tUV+P/fiIiI8l5oaCiaNGmCdevWKZWfOXMGK1eulCgqIiKi7GmdXFpaWgIAwsPDVeoCAwORmpoKAKhXr57a+8tkMm1DIF3i/pZERJLZtWsX6tevj+vXr6vUjRw5EiNHjpQgKiIiIs1onVx+8sknEAQB169fFxPJdMePHxev169fX+39IyMjAUDtYkCUz1JSgHPnlMuYXBIR5bnExESMGzcOvXv3VhnJY2Fhga1bt2LNmjUoVqyYRBESERFlT+vkMj1pjI6OxsaNG8XyBw8e4ODBgwDk8zHr1Kmj9v7pw2a5L5ceuHkTyDg8mfMtiYjy1KNHj9C8eXOsWLFCpc7V1RUBAQHo37+/BJERERHljNZbkfTo0QObN29GamoqFi9ejFOnTsHW1haXLl3C+/fvIZPJ0K1bN7XzLZ89e4YXL15AJpOhWrVq2oZC2vr/tjKiSpWA0qUlCYWIqCjYv38/Bg8ejJiYGJW6QYMGYeXKlbCwsMj/wIiIiHJB657LypUrY+TIkeKCPkFBQTh+/Dji4uIAAA4ODhgxYoTa+/7zzz/idXd3d21DIW3dvat8mwk/EVGeSE5OxuTJk9G1a1eVxLJYsWLYsGEDNm3axMSSiIgKFK17LgFg3LhxKF68ONasWaP0Junm5oZ58+bBxsZG5T6CIGDnzp0A5Iv6NG/eXBehkDZCQpRvM7kkItK55ORktG7dGucyznEH4OzsDF9fX9SuXVuCyIiIiLSjk+QSAAYPHowBAwbg4cOHiIuLQ9myZVGqVKlM27979w5jxowBAJibm8PW1lZXoVBuZey5dHGRJg4iokLM2NgYHh4eKsllnz598Mcff6B48eISRUZERKQdnSWXAGBoaPi/9u47rqnr/x/4KwQUWYKCE22dqKgV3HtiFa1WK26sq9VatbX6aR11VK11Va2rTupAUcE9ahXcVkELKg7cC0VkyN6Q3x98ub+EhJmQm4TX8/Hw8bi59+SeN8kF885533NQr169QrW1srLCgAEDNNk9qSMrC3j0SHEfRy6JiErEggULcPXqVVy4cAFlypTBmjVrMHHiRC7PRUREek3tey7JQLx+DSQnK+7jyCURUYkwNjbG3r170a5dO/z777/45ptvmFgSEZHe0+jIJemx3CWxVlacKZaISE3h4eF53iJStWpVXLlyhUklEREZDI0nl0lJSThx4gT8/f1x7949fPjwAYmJiTA3N4eNjQ0cHR3Rpk0b9O3bF+XKldN091RcuSfzcXAA+IGHiKhYMjMz8euvv2LZsmW4cuUKnJycVLZjYklERIZEo8nljh07sGHDBmEZEgDCEiWxsbGIi4vDy5cvcerUKSxfvhyTJ0/Gl19+qckQqLi4DAkRkUa8f/8eI0eOxNmzZwEAgwcPxs2bN1G+fHmRIyMiIipZGrnnMisrC1OnTsWyZcsQHx8PmUwm/JMnvz8+Ph5Lly7F1KlTldqRCFSNXBIRUZFcvnwZTk5OQmIJAE+ePMH48eP5fx0RERk8jYxcLlu2DGfOnBHKe2xsbNCnTx84OzvD3t4e5cqVQ3JyMt68eYPAwECcOnUKUVFRkMlkOHv2LJYtW4aZM2dqIhQqLo5cEhEVW1ZWFlasWIE5c+YgMzNT4ZhUKkXz5s0hk8lYBktERAZNIlPzq9SnT5+iX79+yMrKgkwmg7u7O3744Yd876dMTU3FmjVr8NdffwHInjXv6NGjqFOnjjqhlBrdu3cHAPj5+WnmhPHx2RP4yAsOBho31sz5iYgMWFRUFL788kucPHlS6VjVqlWxb98+dOrUSYTIiIioNNB4bqAGtctiDx48KHxLO378eMyZM6fAiXrKli2Ln376CRMmTACQPfHBwYMH1Q2Fiiv3+pZGRkDduuLEQkSkR65fvw5nZ2eViWWPHj1w69YtJpZERFRqqJ1cXrt2DQBQvnx5fPfdd0V67uTJk2FjY6NwHhJB7vstP/4YMDUVJRQiIn0gk8mwZs0adOzYEa9evVI4JpFIsGDBApw+fRqVKlUSKUIiIiLtUzu5DAsLg0QiQevWrWFiYlKk55qYmKBVq1aQyWQICwtTNxQqLt5vSURUaDExMfjiiy8wbdo0ZGRkKByrVKkSzpw5g/nz50MqlYoUIRERkTjUntAnMTERAIo9xXrO83LOQyLgTLFERIUSGBgINzc3PHv2TOlYp06d4OXlhWrVqokQGRERkfjUHrnMSQ7fvn1brOfnjFhy/S8RceSSiKhQgoODVSaWs2bNgp+fHxNLIiIq1dROLmvVqgWZTIYbN24gKiqqSM+Njo6Gv78/JBIJatWqpW4oVBxZWcoT+nDkkohIpS+//BJjx44VHleoUAEnT57EkiVLYGyskdW9iIiI9JbayWXHjh0BAGlpaZg9e7bS/Sd5yczMxJw5c5CWlqZwHtKyV6+AlBTFfRy5JCLK07p169CkSRO0bdsWQUFBcHV1FTskIiIinaB2cunm5gZLS0sAwKVLl+Du7o6Q3Pfw5fLw4UOMHj0aFy5cAABYWFhg8ODB6oZCxZH7vSpfHuDshkREeTIzM8Pp06dx8eJF1KxZU+xwiIiIdIbaNTw2Njb46aef8PPPP0MikeDWrVsYMGAAGjRoAGdnZ1SrVg3lypVDcnIywsLCEBgYiAcPHgDInspdIpFg5syZsLa2VjcUKg5V91tKJOLEQkSkAxITEzF58mQMGDAA/fr1U9mG91YSEREp08gNIoMGDUJsbCxWrVqFzMxMAEBISEieI5gymQwAIJVKMX36dHzxxReaCIOKgzPFEhEJHjx4ADc3N9y7dw9HjhxBUFAQPv74Y7HDIiIi0gtql8XmGDduHDw9PdGiRQsA2QlkXv8AoGXLltizZ4/CxAgkAs4US0QEANizZw9atmyJe/fuAchez3Lw4MHC3ABERESUP41Obefk5ARPT088ffoU/v7+ePDgAaKjo5GUlAQzMzNUqFABDRs2RJs2bVC7dm1Ndk3FxZFLIirlUlJS8N1332HLli1Kx0JCQnDnzh3hi1MiIiLKW4nMm16nTh3UqVOnJE5NmhQXB/zfOqMCjlwSUSny5MkTuLm54datW0rHmjZtCm9vb9SvX1/7gREREekhtZLLp0+fIjQ0FAkJCbCwsIC9vT2TSn2SuyTWyAjg+0dEpYSPjw/Gjh2L+Ph4pWPjx4/H2rVrUa5cOREiIyIi0k9FTi4zMjKwa9cu7NixAxEREUrH7ezsMHr0aLi7u8PExEQjQVIJyZ1c1q4NlC0rTixERFqSlpaG//3vf1i7dq3SMTMzM/z5558YNWqUCJERERHptyIll4mJifjmm29w48YNYWIeAJBIJMLj9+/fY8WKFTh//jz+/PNPWFhYaDZi0hzeb0lEpcyLFy8wZMgQBAQEKB1r2LAhfHx80KhRIxEiIyIi0n9Fmi124cKFCAgIENanzKEq0bx58yYWLlyouUhJ8zhTLBGVIsePH4ezs7PKxNLd3R03btxgYklERKSGQo9cPn78GEePHhWSx+rVq2PUqFFo1qwZypcvj9jYWAQGBsLT0xOhoaGQyWQ4fvw4xo8fz8kQdBVHLomolHj58iUGDhyIjIwMhf2mpqZYt24dxo0bp/ClKRERERVdoUcuDx06JGy3bNkSx44dw6hRo9C0aVN89NFHaNq0KUaPHo1jx44pTNl+5MgRjQZMGpKZCTx+rLiPI5dEZKA++ugjLFq0SGFfvXr1cP36dYwfP56JJRERkQYUOrnMmabd2NgYK1asgJmZmcp2ZmZmWL58OYyNswdFg4KC1I+SNO/lSyA1VXEfRy6JyID9+OOPcHV1BQAMHjwYN2/exCeffCJyVERERIaj0Mnl69evIZFI0KxZM1SpUiXfttWqVYOzszNkMhlev36tdpBUAnLfb2ljA9jZiRMLEZEWGBkZYdeuXdi2bRv27dsHKysrsUMiIiIyKIVOLmNjYwFklxYVRs2aNQEAcXFxxQiLSlzu5NLBAWBZGBHpuXfv3uHgwYN5Hq9YsSLvryQiIiohhU4u09PTAWRPflAYZf9vvcSc55GOyT2ZD++3JCI9d/78eTRr1gxDhw7FtWvXxA6HiIio1CnSUiRkQFSNXBIR6aGsrCwsXrwYPXr0QHh4ODIyMjB48GBERkaKHRoREVGpwuSytOIyJERkACIiIuDq6oq5c+ciKytL2B8aGooff/xRxMiIiIhKn0Kvc0kGJDYWePdOcR/LYolIz1y9ehVDhgzBmzdvlI716tULy5cvFyEqIiKi0qvIyeWRI0dw/vz5AtvlTAAEAN27dy+wvUQiga+vb1HDoeLIXRIrlQJ16ogTCxFREWVlZeH333/HrFmzkJmZqXDMyMgIixcvxk8//QQjIxbnEBERaVORk8vExEQkJiYWqm3ObHxv377Nt51MJiuRmft8fX1x7NgxBAcHIzIyEhYWFqhZsyZ69OiBoUOHwtLSUuN95rZ27Vps2LBBeDxgwAAsXbq0xPvNV+7ksnZtoEwZcWIhIiqC6OhojB49GsePH1c6VqVKFXh5eaFLly7aD4yIiIiKllzKZLKSikOjEhISMGPGDKUR1ujoaERHR+PWrVvw9PTE6tWr4ezsXGJxhISEYMuWLSV2/mLj/ZZEpIcCAgIwePBgvHz5UulY9+7dsWfPHlSuXFmEyIiIiAgoQnL522+/lWQcGpORkYGpU6fi6tWrAABbW1u4ubmhbt26iI2NxYkTJxAYGIh3795hwoQJ2Lt3L+rVq6fxODIzMzF79mykp6fDzMwMSUlJGu+j2HKPXPJ+SyLSYTKZDOvWrcOMGTOUlreSSCSYN28e5s6dC6lUKlKEREREBBQhuRwwYEBJxqExBw4cEBLLunXrYufOnbC1tRWOjxgxAsuWLYOHhwfi4uIwb948eHl5aTyObdu24d69ezA3N8e4ceOwdu1ajfdRbBy5JCI9Mn78eHh4eCjtt7Ozw549e+Di4iJCVERERJSbQc12kJmZiY0bNwqPly9frpBY5pgxYwYaNmwIAAgMDMTly5c1GsfTp0+xfv16AMAPP/yAqlWravT8asnMBB4/VtzHkUsi0mG9e/dW2tehQwcEBQUxsSQiItIhBpVcBgQEICIiAgDQqlUrODo6qmwnlUrh7u4uPD558qTGYsjKysKcOXOQlpaGZs2aYfjw4Ro7t0a8eAGkpSnu48glEemwQYMGYerUqcLjn376CefPn0f16tVFjIqIiIhyM6h1LuVHIDt16pRv286dOwvbly5d0lgMu3btQlBQEExMTLB48WLdmwo/9/2WFSoAKkZ3iYh0yYoVK/Do0SNMnjwZffr0ETscIiIiUsGgkstHjx4J202aNMm3ra2tLapWrYqwsDBERUUhOjoaFSpUUKv/V69eYc2aNQCAr7/+ukQmClKbqvstS2AZGCKiogoLC8vzNoIyZcrg77//1nJEREREVBQ6NqymnufPnwvb9vb2BbaXb/Ps2TO1+pbJZJgzZw6Sk5NRp04dTJw4Ua3zlRjOFEtEOmjnzp2oW7cu9u3bJ3YoREREVEwGlVzGx8cL2zY2NgW2t7a2Vvnc4ti7dy8CAgIgkUiwaNEilClTRq3zlRjOFEtEOiQpKQnjxo3D6NGjkZSUhK+++goPc38JRkRERHrBoJJL+bUky5YtW2B7+TaJiYnF7vft27f4/fffAQDDhg1D8+bNi32uEseRSyLSEQ8fPkSbNm0UlhlJSEiAm5ubbq0NTERERIViUMmlWObOnYvExERUrlwZ06dPFzucvMXGAuHhivs4cklEIti3bx9atGiB4OBgpWMdOnTQvcnQiIiIqEAG9b+3mZmZsJ2amlpge/k25ubmxerTx8cHV65cAQDMnz8fFhYWxTqPVrx4ofhYIgFq1RIlFCIqnVJSUjBp0iQMGzYMCQkJCscsLCzg5eWFjRs3wtTUVKQIiYiIqLgMarZYS0tLxMbGAgA+fPhQYMIYExOj8NyiCg8Px7JlywAAvXr1Qvfu3Yt8Dq2Sm/AIAFCtGlCI8mEiIk149uwZ3NzcEBgYqHSsSZMm8Pb2hgOrKYiIiPSWQSWXtWrVQmhoKAAgNDS0wBljc9oCQO3atYvc3+nTpxEXFwcge2mTjRs3qmz34MEDYfvhw4dCOzs7O7i5uRW532LLPXLJUUsi0pLDhw9jzJgxwheA8saNG4e1a9cqVJ8QERGR/jGo5LJ+/fq4fPkyACA4OBht2rTJs21kZCTCwsIAABUrVizWGpcymUzY9vT0LNRz7t+/j/v37wMAGjRooN3kMvfI5ccfa69vIiqV0tLSMHPmTKxevVrpWLly5fDnn3/iyy+/FCEyIiIi0jSDuueyY8eOwvalS5fybXvx4kVhu3PnziUWk07hyCURadGrV6/QuXNnlYllgwYNEBAQwMSSiIjIgBjUyGWrVq1gZ2eHiIgIBAQE4N69e3B0dFRql5mZid27dwuPXV1di9Xf6NGjMXr06ALbHTp0CLNmzQIADBgwAEuXLi1Wf2rjyCURaVF4eDj+++8/pf3Dhw/H5s2bdXsCNCIiIioyjY9c3rlzB7/++isGDhyIdu3aoXHjxmjUqJFSu7i4OFy8eBEXL15ESEiIRvqWSqWYNGmS8Pinn35CVFSUUruVK1cK90E6OzsrjHjK8/f3h4ODAxwcHNCtWzeNxCgamYwjl0SkVS1bthTWAAay1xbevHkzPD09mVgSEREZII2NXEZHR2PWrFkK5ag59yRKJBKl9uXKlcPcuXMREREBe3t7nD17ViNxDB48GL6+vrh69SoeP36M/v37w83NDXXr1kVMTAxOnjwpfJNuaWmJhQsXaqRfnRcdDcTHK+7jyCURlbDJkyfj0qVLuHXrFry9vdGsWTOxQyIiIqISopHkMjw8HMOGDUNYWJjCJDf5MTExwbBhw/DHH38gNDQUt27d0siHDmNjY6xduxYzZszA+fPnERERoXIW1ypVqmD16tWoV6+e2n3qhdwlsVIpUKOGOLEQUakhkUiwfft2yGQylC9fXuxwiIiIqARppCz2u+++w9u3byGTyVCnTh2sWrUK//77L4YPH57v8/r06SNsX7lyRROhAMheiHvTpk3YsGEDevbsiapVq6JMmTKwsbHBJ598ghkzZuDEiRNwdnbWWJ86L3dJbI0agLFB3XJLRCLIzMzEggULsGTJkjzbWFlZMbEkIiIqBdTOLnx9fXHr1i1IJBI0b94cW7duRbly5QCoLoeVV7NmTVSpUgXh4eG4deuWuqEo6dGjB3r06FHs57du3RoPHz5UO46BAwdi4MCBap9HLZzMh4g0LDw8HCNGjICfnx+MjIzQpk0b/b8/nYiIiIpN7ZHLv//+G0D2ZDpLly4VEsvCatCgAWQyGV7kHlkjzeJkPkSkQRcvXkSzZs3g5+cHAMjKysLw4cOF9YOJiIio9FE7ucwZtXRycoK9vX2Rn29jYwMge0IgKkEcuSQiDcjKysJvv/2Gbt264d27dwrHIiMjcfnyZZEiIyIiIrGpXRabkxR+XMxkxcTEBACQlpambiiUH45cEpGaoqKi4O7uLlSsyKtevTr279+P9u3bixAZERER6QK1Ry5z7qvMzMws1vNjYmIAZE/4QCVE1RqXHLkkoiL4999/0axZM5WJ5aeffoqgoCAmlkRERKWc2sllhQoVAABv3rwp1vODg4MhkUhQqVIldUOhvLx/DyQnK+7jyCURFYJMJsOqVavQuXNnhIaGKhwzMjLC4sWLcerUKdjZ2YkUIREREekKtctiGzdujNDQUNy+fRtxcXFFGoG8du0awsLCIJFISteyINqW+35LExOgalVxYiEivfHhwweMGTMGR48eVTpWuXJleHl5oWvXriJERkRERLpI7ZHLnA8Wqamp+PPPPwv9vISEBCxcuFB4rM6SIVSA3MnlRx8BUqk4sRCRXrh58yaaN2+uMrHs0qULbt26xcSSiIiIFKidXPbp0wc1atQAAOzYsQNbtmyBTCbL9zmPHz/GyJEj8fz5c0gkEjg6OqJdu3bqhkJ54f2WRFQEKSkp6NevH57n+mJKIpHg559/hq+vL6pUqSJSdERERKSr1C6LNTY2xuLFizFu3DhkZmZi9erVOHz4MHr16oWnT58K7c6dO4eXL1/iypUruHbtmpCAmpqa4tdff1U3DMpP7pFL3m9JRPkwNTXFtm3b0KdPH2Gfra0tPD098emnn4oYGREREekytUcuAaB169ZYsWIFTE1NIZPJ8OLFC2zatAn+/v7CbLLffvstli9fjn///RdZWVmQyWQwMzPD6tWr4eDgoIkwKC9choSIisjV1RWzZs0CALRv3x5BQUFMLImIiChfGkkuAaB37944cOAAWrduDZlMpvAPgNLjVq1aYf/+/ejSpYumQqC85B65ZFksERXCwoULsXHjRpw/fx729vZih0NEREQ6Tu2yWHn16tXDzp07ERISgkuXLiEoKAjv379HQkICypUrh4oVK+KTTz5Bly5d0LRpU012TXnJygJevlTcx5FLIgKQmJiIvXv3Yvz48UKViTxjY2N88803IkRGRERE+kijyWWOBg0aoEGDBiVxaiqqt2+B9HTFfRy5JCr17t+/Dzc3N9y/fx9GRkYYN26c2CERERGRntNYWSzpqNz3W5qaApUrixIKEemG3bt3o2XLlrh//z4AYPLkybhz547IUREREZG+Y3Jp6FTdb6mi/I2IDF9ycjK++uorjBo1CklJScL+lJQUjBs3rsBlpIiIiIjyUyJlsaRDuAwJEQF49OgR3NzcVI5QNmvWDF5eXirvuyQiIiIqLLWTyxs3bmgiDgBAy5YtNXYu+j+5y2J5vyVRqXPgwAGMHz8e8fHxSscmTJiANWvWwNTUVITIiIiIyJConVy6u7tr5NtuiUQi3P9DGsSRS6JSKzU1FTNmzMD69euVjpmbm2PLli0YPny4CJERERGRIdJIWSzv09FhHLkkKpWeP3+OwYMH4+bNm0rHHB0d4ePjw1m9iYiISKPUTi4LW8oqk8kQHx+PFy9eIDU1FRKJBGXKlOF6lyUpIwN4/VpxH0cuiQze0aNHMXr0aMTExCgdGz16NDZs2AAzMzPtB0ZEREQGTe3kcvfu3UVqn56eDj8/P6xatQqvX79GjRo18Msvv8DExETdUCi30FAgM1NxH5NLIoO2atUqTJ8+XWl/uXLlsGHDBowZM0aEqIiIiKg00PpSJCYmJujVqxcOHTqEhg0b4vDhw1i4cKG2wygdct9vaWEBVKggTixEpBUuLi5Kk/M4ODjA39+fiSURERGVKNHWubSwsMCKFSsgkUjg4+ODa9euiRWK4cp9v2WtWlzjksjANWnSBBs3bhQeDxs2DDdu3ECTJk1EjIqIiIhKA9GSSwCoU6cOPvnkEwDA/v37xQzFMOUeueRkPkSlwpgxYzBhwgRs2rQJe/bsgaWlpdghERERUSmgkdli1VGrVi0EBQUhODhY7FAMD5chITJY4eHhqFixIoyNVf8Z37Rpk5YjIiIiotJO1JFL4P8vYxIRESFyJAaIy5AQGSQ/Pz80bdoUP//8s9ihEBEREQlETy5v374NAJwWvyRw5JLIoGRmZmLhwoVwcXHB+/fvsWzZMpw4cULssIiIiIgAiJxc7tmzB8+ePYNEIkG9evXEDMXwpKYCb98q7uPIJZHeev/+PXr37o358+cLFR8AMGrUKLx8+VLEyIiIiIiyafWey6ysLMTExODBgwc4dOgQTp06JRzr06ePNkMxfK9eAXIfQAEwuSTSU5cvX8bQoUPxNvcXRgDatWsHCwsLEaIiIiIiUqR2ctmwYcNiPzfn2/fGjRvDzc1N3VBIXu77La2ts/8Rkd7IysrCihUrMGfOHGRmZiock0qlWLJkCWbMmAEjI9HvcCAiIiJSP7mUyWSQSCQKZVpF0aVLFyxbtgxSqVTdUEge77ck0mtRUVEYNWqUQoVHjmrVqmH//v3o0KGDCJERERERqaaRstjCJpbGxsYwNzdH9erV0bRpU/Tt2xctWrTQRAiUG2eKJdJb169fx+DBg/H69WulYy4uLvD09ESlSpVEiIyIiIgob2onlyEhIZqIgzSNI5dEekcmk+GPP/7A//73P2RkZCgck0gk+OWXXzB79mxWehAREZFO0uqEPqRFTC6J9EpMTAzGjh2Lw4cPKx2rVKkS9u7di+7du4sQGREREVHhqJ1cJiQkCNucsVCHsCyWSK+kpKTg2rVrSvs7d+4MLy8vVK1aVYSoiIiIiApP7SkGW7RogZYtW6JXr15KsxmSSJKSgPBwxX0cuSTSaVWqVIGXl5fCzK9z5syBr68vE0siIiLSC2onlzn3/rRo0YL3AekKVQuqf/SR9uMgoiLp0qULFi1ahIoVK+LUqVNYvHgxjI159wIRERHpB7WTy4oVKwIArKys1A6GNCT3/ZZ2dgBLlon0wsyZM3H37l307t1b7FCIiIiIikTt5LJmzZoAgPfv36sdDGkI77ck0kkymQweHh5wd3fPcwknIyMjVKlSRcuREREREalP7eTSxcUFMpkMN2/eRHJysiZiInVxplginZOYmIgxY8Zg3Lhx8PT0xLp168QOiYiIiEij1E4uBw4ciCpVqiAxMRG///67JmIidXHkkkinPHjwAK1bt8bOnTuFfTNmzEBAQICIURERERFpltrJpaWlJVatWgULCwvs2bMH8+bNQ3x8vCZio+LiyCWRztizZw9atmyJe/fuKexPT0/HP//8I1JURERERJpX6GkIjxw5AgCoXbs2mjZtqrR/xIgR2LZtG7y9vXHs2DG0b98ejo6OqFChAkxNTQvVx+eff17owCkfuZNLjlwSaV1KSgq+++47bNmyRemYpaUltm3bhsGDB4sQGREREVHJKHRyOXPmTEgkEowYMUIhuczZn0MmkyElJQXnzp3DuXPnCh2IRCJhcqkJcXFAdLTiPo5cEmnVkydP4Obmhlu3bikda9q0Kby9vVG/fn3tB0ZERERUgtQuiwWyE8qcf6r2FfYfaUDu+y0BrnFJpEU+Pj5wdnZWmVh+9dVXuH79OhNLIiIiMkhqr849YMAATcRBmpI7uaxaFShkWTIRFV9aWhr+97//Ye3atUrHzMzMsGnTJri7u4sQGREREZF2qJ1c/vbbb5qIgzSFk/kQad2LFy8wePBg3LhxQ+lYw4YN4ePjg0aNGokQGREREZH2aKQslnQIlyEh0roxY8aoTCzd3d1x48YNJpZERERUKjC5NDQcuSTSus2bN8PCwkJ4bGpqiq1bt2Lnzp0wNzcXMTIiIiIi7WFyaWg4ckmkdfXr18e2bdsAAPXq1cP169cxfvx4hZm0iYiIiAyd2vdcko55/17xsb29OHEQlTJDhgxBcnIyBg4cCCsrK7HDISIiItK6IieXZ8+exaNHjzQeiEQiwc6dOzV+3lInIUHxMT/kEmlEZmYmNm3ahPHjx6Ns2bIq24wePVq7QRERERHpkCInl+/fv8f73KNjapLJZCwf0wSZTDm55P1eRGoLCwvD8OHDceHCBYSEhGDdunVih0RERESkc4p8z6VMJtP4P9KQlJTsBFOe3CQjRFR058+fh5OTEy5cuAAAWL9+PQ4cOCBuUEREREQ6qMgjl02bNkWnTp1KIhZSV+5RS4DJJVExZWVlYcmSJZg/fz6ysrIUjn3zzTfo3bs3LC0tRYqOiIiISPcUK7mcPHlyScRC6lKVXLIslqjIIiIiMHLkSJw5c0bpmL29Pfbv38/EkoiIiCgXLkViSBITlfeZmWk/DiI9duXKFTg5OalMLHv37o2goCC0a9dOhMiIiIiIdBuTS0OSe+TSzAww4ltMVBhZWVlYsWIFunTpgjdv3igcMzIywpIlS3DixAnY2tqKFCERERGRbuM6l4Ykd3LJ+y2JCiU6OhqjR4/G8ePHlY5VqVIF+/btQ+fOnUWIjIiIiEh/MLk0JLnLYplcEhXI398fQ4YMwcuXL5WOde/eHXv27EHlypVFiIyIiIhIv7Bm0pBwjUuiIjl+/Dg6duyolFhKJBLMnz8f//zzDxNLIiIiokLiyKUhYVksUZG0a9cOVatWxatXr4R9dnZ22LNnD1xcXESMjIiIiEj/FGnkUiaTlVQcpAksiyUqkooVK+LAgQMwMTEBAHTs2BG3bt1iYklERERUDIUeufTz8wMAWDBh0V0siyUqstatW2PlypUICwvDokWLYGzMgg4iIiKi4ij0p6jq1auXZBykCSyLJVIpISEBaWlpqFChgsrjU6dO1XJERERERIaHE/oYEpbFEikJDg5GixYtMGLECGRlZYkdDhEREZHBYnJpSFgWS6Rgx44daN26NR4+fIjTp09j6dKlYodEREREZLCYXBoSjlwSAQCSkpIwduxYjBkzBsnJycL+uXPn4uLFiyJGRkRERGS4OHOFIeE9l0R4+PAhBg0ahLt37yoda9asGezt7UWIioiIiMjwceTSkLAslko5Ly8vtGjRQmViOWnSJFy9ehV16tQRITIiIiIiw8eRS0PCslgqpVJSUjBt2jRs2rRJ6ZiFhQW2bt2KoUOHihAZERERUenB5NKQsCyWSqGnT5/Czc0NQUFBSseaNGkCb29vODg4iBAZERERUenCslhDwrJYKmUOHToEZ2dnlYnluHHjcP36dSaWRERERFrC5NKQsCyWSom0tDR8//33+OKLLxAXF6dwrFy5ctixYwe2bdsGMzMzkSIkIiIiKn1YFmsoZDKOXFKpcu3aNaV9DRo0gLe3Nxo3bixCRERERESlG0cuDUVycnaCKY8jl2SgypQpgwMHDsDGxkbYN2LECNy4cYOJJREREZFImFwaitwlsQCTSzJoH330EXbt2gVTU1Ns2bIFu3fvhgWveSIiIiLRsCzWUOQuiQVYFksGQSaTQSKRqDzWt29fPH/+HFWqVNFyVERERESUG0cuDYWq5JKTmZCeO3PmDLp3746kpKQ82zCxJCIiItINTC4NRe6yWHNzwIhvL+mnzMxMzJ8/H7169cL58+fx7bffih0SERERERWA2Yeh4EyxZCDCw8PRs2dPLFy4ELL/m6Rqx44d+Ouvv0SOjIiIiIjyw+TSUHCNSzIAFy5cQLNmzXDu3DmlY//8848IERERERFRYTG5NBS5Ry6ZXJIeycrKwpIlS9C9e3e8e/dO4ZhUKsWKFSvg5eUlUnREREREVBicLdZQsCyW9FRkZCTc3d1x+vRppWPVq1fH/v370b59exEiIyIiIqKi4MiloWBZLOmhf//9F05OTioTy08//RRBQUFMLImIiIj0BJNLQ8GyWNIjMpkMK1euROfOnREaGqpwzMjICIsXL8apU6dgZ2cnUoREREREVFQsizUULIslPfHhwweMHj0ax44dUzpWuXJleHl5oWvXriJERkRERETq4MiloWBZLOmJBQsWqEwsu3btilu3bjGxJCIiItJTTC4NBctiSU8sWrQI9erVEx5LJBLMnTsXZ8+eRZUqVUSMjIiIiIjUweTSULAslvSElZUVvL29YWpqCltbW/z9999YuHAhpFKp2KERERERkRp4z6WhYFks6ZFPPvkEBw4cgJOTE+zt7cUOh4iIiIg0gCOXhoIjl6RDZDIZduzYgbCwsDzbfPbZZ0wsiYiIiAwIk0tDwXsuSUckJCRg1KhRGDNmDIYNG4aMjAyxQyIiIiIiLWByaShYFks64P79+2jVqhU8PT0BABcvXsT8+fNFjoqIiIiItMGg77n09fXFsWPHEBwcjMjISFhYWKBmzZro0aMHhg4dCktLS430k5qaiuvXr+P69esIDg7G8+fPERsbCxMTE1SoUAGNGzdGr1690KNHD5iYmGikTyUsiyWR7d69GxMnTkRSUpLC/t9++w0jR45Ew4YNRYqMiIiIiLTBIJPLhIQEzJgxA+fPn1fYHx0djejoaNy6dQuenp5YvXo1nJ2d1err5MmTmDt3LhJzjxwCSE9PR1JSEkJDQ3H69GnUr18fK1euhIODg1p9qsSyWBJJcnIypk6dim3btikds7KygoeHBxNLIiIiolLA4JLLjIwMTJ06FVevXgUA2Nraws3NDXXr1kVsbCxOnDiBwMBAvHv3DhMmTMDevXsV1twrqjdv3giJpY2NDdq1a4emTZvCzs4OGRkZePDgAY4cOYIPHz7g0aNHGDVqFLy8vFC7dm2N/LwAAJmMZbEkikePHsHNzQ137txROubk5ARvb2/UqVNHhMiIiIiISNsMLrk8cOCAkFjWrVsXO3fuhK2trXB8xIgRWLZsGTw8PBAXF4d58+bBy8tLrT4/+eQTjB8/Hl27dlUqe+3fvz8mTJiASZMmITAwEDExMViwYAF27dqlVp8KkpOzE0x5LIulEnbgwAGMHz8e8fHxSscmTpyI1atXw9TUVITIiIiIiEgMBjWhT2ZmJjZu3Cg8Xr58uUJimWPGjBlCmV5gYCAuX75c7D4HDx6MAwcOoGfPnnneT2ljY4O1a9cKH7T9/f0RGhpa7D6VqCjJ5cgllZTU1FRMmTIFQ4YMUUoszc3NsWfPHvz5559MLImIiIhKGYNKLgMCAhAREQEAaNWqFRwdHVW2k0qlcHd3Fx6fPHmy2H1aW1sXqp2dnR1atmwpPH748GGx+1SS+35LgMkllYjnz5+jQ4cOWL9+vdIxR0dH3Lx5E8OHDxchMiIiIiISm0GVxcqPQHbq1Cnftp07dxa2L126VGIxyTOXK1VNTU3V3IlVJZflymnu/EQA7t27h/bt2yM2Nlbp2JgxY7B+/XqYmZmJEBkRERER6QKDGrl89OiRsN2kSZN829ra2qJq1aoAgKioKERHR5dobIBifNWqVdPciXOXxZqbA0YG9daSDnBwcECzZs0U9pUrVw4eHh7w8PBgYklERERUyhlUBvL8+XNh297evsD28m2ePXtWIjHl8Pf3F/qoUKFCgclvkXAZEtICY2NjeHl5oVKlSgCyk01/f3+MGTNG5MiIiIiISBcYVHIpP7mIjY1Nge3l75dUNeOlpqSkpGDBggXC4wkTJkAqlWqug9zJJWeKpRJStWpVeHl5YeTIkbhx44ZmvyQhIiIiIr1mUPdcJiUlCdtly5YtsL18m0RVM65qyKxZs4RRyyZNmmDEiBGa7YBrXJIGZWRkICwsDDVq1FB5vFu3bujWrZuWoyIiIiIiXWdQI5e6aNWqVTh16hSA7JHS1atX57lkSbGxLJY05O3bt+jevTu6deuGuLg4scMhIiIiIj1iUMml/IQihZmNVb6NeQmUkv7555/YvHkzAMDKygoeHh55jgaphWWxpAF+fn5wcnLCpUuX8OTJE4wfPx4ymUzssIiIiIhITxhUcmlpaSlsf/jwocD2MTExKp+rCVu2bMGaNWuEc2/fvj3PdTfVxrJYUkNmZiYWLlwIFxcXvH//Xtjv7e2tcj1LIiIiIiJVDCq5rFWrlrAdGhpaYHv5NrVr19ZYHFu2bMHvv/8OALCwsMD27dvRtGlTjZ1fCUcuqZjev3+PXr16Yf78+UqjlDVr1kSrVq1EioyIiIiI9I1BJZf169cXtoODg/NtGxkZibCwMABAxYoVUaFCBY3EIJ9YmpubY9u2bfjkk080cu488Z5LKoZLly6hWbNm8PX1VTrWt29fBAUFoXXr1iJERkRERET6yKCSy44dOwrbly5dyrftxYsXhe3OnTtrpH/5xNLMzAzbtm2Dk5OTRs6dL5bFUhFkZWVh2bJl6Natm/AFSw6pVIply5bh6NGjGvvChYiIiIhKB4NaiqRVq1aws7NDREQEAgICcO/ePZX3OWZmZmL37t3CY1dXV7X73rp1q1Ji6ezsrPZ5C4VlsVRIUVFRGDVqlDCDsbxq1aph//796NChgwiREREREZG+M6iRS6lUikmTJgmPf/rpJ0RFRSm1W7lyJR48eAAAcHZ2VhjxlOfv7w8HBwc4ODjku67f9u3bsXLlSgDZieXWrVvRvHlzdX6UouHIJRXC9evX4eTkpDKx7NmzJ4KCgphYEhEREVGxGdTIJQAMHjwYvr6+uHr1Kh4/foz+/fvDzc0NdevWRUxMDE6ePIn//vsPQPYsrgsXLlSrP29vbyxfvlx4PHToUMTExKi8j01erVq1UKdOHbX6FvCeS8qHTCbDH3/8gf/973/IyMhQOGZkZIQFCxZg9uzZkEqlIkVIRERERIbA4JJLY2NjrF27FjNmzMD58+cRERGBjRs3KrWrUqUKVq9ejXr16qnVX06imsPDwwMeHh4FPm/y5MmYMmWKWn0LWBZL+ZBIJLhz545SYlm5cmXs3bs331F5IiIiIqLCMrjkEshe/mPTpk3w9fXF0aNHERwcjKioKJibm6NmzZpwcXHB0KFDNb62pWhYFksFWL9+PW7cuIG7d+8CyJ7EysvLC1WrVhU5MiIiIiIyFBJZ7sXtSOd1794dAODn55e9o2pV4N27/9/gzBnAxUWEyEiXPXz4EC1btsTUqVOxYMECGBsb5HdLRERERKWKUm4gIn66NAQsi6X/I5PJIJFIVB5zcHDAkydPUKlSJS1HRURERESlgUHNFlsqyWQsiyUAwJ07d9CuXTu8fPkyzzZMLImIiIiopDC51HfJydkJpjwml6WKTCaDh4cHWrdujevXr2Pw4MFIS0sTOywiIiIiKmWYXOq73CWxAMtiS5HExESMGTMG48aNQ0pKCgAgICAAP/74o8iREREREVFpw3su9V3ukliAI5elxIMHD+Dm5oZ79+4pHfv333+RnJyMcuXKiRAZEREREZVGHLnUd7lHLiUSgAmFwduzZw9atmypMrGcMmUKLl++zMSSiIiIiLSKI5f6LndyaWYGGPE7A0OVkpKC7777Dlu2bFE6Zmlpie3bt8PNzU2EyIiIiIiotGNyqe84U2yp8eTJE7i5ueHWrVtKxz755BN4e3ujXr162g+MiIiIiAgsi9V/XOOyVPD29oazs7PKxPLrr7/GtWvXmFgSERERkaiYXOq73MklRy4NSmpqKqZOnYrBgwcjPj5e4Zi5uTk8PT2xefNm3l9JRERERKJjWay+Y1msQfP09MS6deuU9js6OsLb2xsNGzYUISoiIiIiImUcudR3LIs1aGPGjEHv3r0V9o0aNQr+/v5MLImIiIhIpzC51HccuTRoRkZG2LVrF+zt7WFqaort27djx44dMOeXCERERESkY1gWq+94z6XBs7W1xcGDB2FqaoqmTZuKHQ4RERERkUpMLvUdy2INwunTp2FtbY02bdqoPN6qVSstR0REREREVDQsi9V3LIvVaxkZGfj555/h6uqKwYMHIyoqSuyQiIiIiIiKhcmlvmNZrN4KCwuDi4sLfv31V8hkMrx+/RqjRo1CVlaW2KERERERERUZk0t9x7JYvXTu3Dk4OTnhwoULCvtPnTqFgwcPihMUEREREZEamFzqO5bF6pWsrCwsWrQILi4uCA8PVzhmbGyM1atXY9CgQSJFR0RERERUfJzQR9+xLFZvREREYOTIkThz5ozSsRo1auDAgQN5TuhDRERERKTrOHKp71gWqxeuXLkCJycnlYmlq6srgoKCmFgSERERkV5jcqnvWBar07KysrB8+XJ06dIFb968UTgmlUrx22+/4fjx46hYsaJIERIRERERaQbLYvUdRy51VnR0NL788kucOHFC6VjVqlWxb98+dOrUSYTIiIiIiIg0j8mlPpPJOHKpo2JiYuDs7IyXL18qHevRowf27NmDSpUqiRAZEREREVHJYFmsPktOzk4w5TG51AnW1tbo16+fwj6JRIIFCxbg9OnTTCyJiIiIyOAwudRnuUtiAZbF6pAVK1agZcuWAAA7OzucOXMG8+fPh1QqFTkyIiIiIiLNY3Kpz3KXxAIcudQhZcuWxYEDB9CvXz/cunULPXr0EDskIiIiIqISw+RSn+UeuZRIgHLlxImllJLJZHj69Gmexz/++GMcPXoU1apV02JURERERETax+RSn+VOLs3MACO+pdoSHx+PESNGoFmzZnj06JHY4RARERERiYqZiD7jTLGiCQ4ORsuWLeHl5YWEhAQMGjQIycnJYodFRERERCQaJpf6LPfIJZNLrdixYwdat26Nhw8fCvuCg4MxZcoUEaMiIiIiIhIXk0t9lju55EyxJSopKQljx47FmDFjlEYpy5cvjz59+ogUGRERERGR+IzFDoDUwLJYrQkJCYGbmxvu3r2rdMzZ2Rne3t6oXbu2CJEREREREekGjlzqM5bFasXevXvRokULlYnlpEmTcPXqVSaWRERERFTqceRSn7EstkSlpKRg2rRp2LRpk9IxCwsLbN26FUOHDhUhMsMlk8mQnp6OrKwssUMhIiIiKjFGRkYwMTGBRCIROxSNYnKpz1gWW2KePn0KNzc3BAUFKR1r0qQJvL294eDgIEJkhikpKQmxsbGIj49HZmam2OEQERERlTipVApLS0uUL18eZmZmYoejEUwu9RnLYkvE4cOHMXr0aMTFxSkdGzduHNauXWswfwB0QXx8PEJDQ2FiYgJra2uYm5vDyMjI4L7JIyIiIgKyK7WysrKQmJiIuLg4xMTEwN7eHpaWlmKHpjYml/qMZbElIjQ0VCmxNDMzw59//olRo0aJFJVhSkpKQmhoKKysrFCtWjUmlERERFRqmJubw87ODm/fvkVoaCg++ugjvR/A4IQ++oxlsSVi8uTJGDRokPC4YcOGCAgIYGJZAmJjY2FiYsLEkoiIiEoliUSCatWqwcTEBLGxsWKHozYml/qMI5clQiKRYNu2bahTpw5GjBiBgIAAODo6ih2WwZHJZIiPj4eVlRUTSyIiIiq1JBIJrKysEB8fD5lMJnY4amFZrD7jyKVaZDJZnklN+fLlcf36dVSsWJGJTwlJT09HZmYmzPmlCBEREZVyZmZmiIqKQnp6OsqUKSN2OMXGkUt9xgl9iu3Nmzfo2rUrzp8/n2cbW1tbJpYlKGe5ESMj/hkiIiKi0k0qlQKA3i/Hxk91+oxlscVy5swZNGvWDBcvXsSwYcPw7t07sUMq1ZjAExERUWlnKJ+HmFzqM5bFFklmZibmzZuHXr16ITIyEgAQHh6OYcOGcW1FIiIiIiI18Z5Lfcay2EJ79+4dhg8frrIM9vnz58L0z0REREREVDwcudRnuUcuWRar0oULF+Dk5KQysfzss88QFBTExJKIiIiISE1MLvWVTJb9Tx5HLhVkZWVhyZIl6N69u9J9lVKpFCtXrsTRo0dhY2MjUoRERERERIaDZbH6StVMUkwuBZGRkXB3d8fp06eVjlWvXh379+9H+/btRYiMiIiIiMgwceRSX6laYJVlsQCAq1evolmzZioTy08//RRBQUFMLImIiIiINIzJpb7KPXIpkQDlyokTi46QyWRYuXIlOnfujDdv3igcMzIywuLFi3Hq1CnY2dmJFCERlSanTp3CuHHj0K5dOzg6OsLBwQEODg44dOiQ2KFREQQGBqJBgwZo2LAhQkJCxA6HiHTEhQsX4ODggMaNG+P58+dih6MzWBarr3KPXJqbA6V8MfqAgAD873//U9pfpUoVeHl5oUuXLtoPisgApKWlwc/PD/7+/rh9+zYiIyMRExMDIyMjWFpaolq1amjQoAFat26NLl26wJxVFFi4cCH27NkjdhikpszMTPzyyy+QyWTo378/GjRoUOjn/vXXX1i6dKnweNWqVejTp0+hnnvo0CHMmjULADBgwACF82j6eYmJiTh79iyuX7+O4OBgREdHIz4+HmXLloWNjQ0aNmyI5s2bw9XVFZUqVSpUHIXh6+uLY8eOITg4GJGRkbCwsEDNmjXRo0cPDB06FJaWlhrrCwCePn0KLy8vBAQEICwsDCkpKahYsSIaNGiAPn36oE+fPjAqxOeozMxMPH36FHfv3sW9e/dw9+5dhISEICUlBUDR3i99p+33UFN93rlzB8HBwQgODsbjx4/x4cMHfPjwAenp6bCyskKdOnXQunVrDBw4ENWqVcvzPF26dEGrVq0QEBCAxYsXY/v27Zr8UfUWk0t9lXvkkh/m0Lp1a8ycOVPhj3rXrl2xd+9eVKlSRcTIiPRTVlYWPD09sXXrVrx//15lm5SUFEREROD27dvYv38/ypYti88//xwTJkxA9erVtRyxbrh//76QWFpbW8Pd3R21a9dGmTJlAACNGjUSMzwqgiNHjiAkJARGRkaYOHFikZ578OBBhcc+Pj6FTi61ITMzEx4eHti+fTs+fPigdDw9PR0JCQl4/fo1zpw5g2XLlqF37974/vvvUbNmzWL3m5CQgBkzZijN4B4dHY3o6GjcunULnp6eWL16NZydnYvdT47MzEysXr0a27ZtgyzXF/NhYWEICwvD+fPnsWfPHqxZs6bAzwvff/89zpw5o3Zc+kzb76Gm+/zyyy+RlJSk8lhUVBSioqIQEBCAzZs3Y/LkyZgwYUKe55o0aRICAgJw5coV/Pvvv2jXrl3RfzgDw+RSX3GmWJUWLVqEq1ev4sqVK/j5558xf/58SKVSscMi0jtxcXGYPn06Ll26JOyrVq0a2rRpg4YNG8La2hpSqRQfPnzAq1evEBAQgJCQEKSmpmL//v2IiYnB2rVrRfwJxHPhwgVh++eff8Znn30mXjBUbBkZGdiwYQMAwMXFBbVq1Sr0c2/duoXHjx8r7Lt27RpCQ0Nhb2+v0TiLIyYmBtOmTcO///4r7Pvoo4/QsWNH1K5dGzY2NkhOTkZERARu3LiBGzduIDU1FSdPnkRKSgo2btxYrH4zMjIwdepUXL16FQBga2sLNzc31K1bF7GxsThx4gQCAwPx7t07TJgwAXv37kW9evXU+lkXLlyIffv2AcieKb53795o06YNzM3NERoaimPHjuHx48cICgrC2LFj4eXlhfLly+d5vszMTIXH1tbWsLa2xosXL9SKU1+I8R6WRJ8VK1ZE06ZNUbduXdja2sLOzg4ymQxv3rzBhQsXEBgYiLS0NKxatQrp6emYPHmyyvO0bdsWjo6OuHfvHtasWcPkEkwu9VfukUsmlwAAY2Nj7Nu3D3fv3kXPnj3FDodIL6Wnp2PSpEm4ceMGgOz/hGfPno3evXvn+2XNq1evsHv3buzfv19boeok+aWPHB0dRYyE1HHy5Enh/v1hw4YV6bk+Pj7C9sCBA3Ho0CHIZDIcOnQIU6dO1WicRZWeno5vvvkGgYGBALI/qM+dOxeffvopJBKJUvuJEyciJiYGf/31F3bt2qVW3wcOHBAShLp162Lnzp2wtbUVjo8YMQLLli2Dh4cH4uLiMG/ePHh5eRW7vytXrgiJpZmZGbZu3YoWLVootBk3bhzmzp2LgwcP4unTp/j999+xcOHCPM/ZtGlT1KlTB46OjnB0dESNGjUUypHFIN+/n59fiX6Boe33sCT63L9/P+rVq6fyegeACRMm4MiRI5g5cyZkMhn+/PNPuLm5oXLlyirbDx06FHPnzsXt27dx8+ZNpWustCndN+nps1JcFnvr1i14e3vnebxatWpMLInUsGrVKiGxrF69Onx8fNC3b98CqwBq1qyJOXPm4MiRIxorhdJHaWlpwnZOKSzpn7179wLIvm+/TZs2hX5eUlISTp06BQCwt7fHnDlzYGZmBiA7CchStZSYFv3+++9CYlmtWjXs378fvXr1yvODNpA9Ojdt2jT4+PgUexQqMzNTYcRz+fLlCglCjhkzZqBhw4YAsidTunz5crH6A4CdO3cK2999953KD/1SqRS//PKLUOrr4+OD169f53nOiRMnYvr06ejVqxdq1KhR7Nj0kRjvYUn0Wb9+/XyvdwD4/PPPhbk6MjIy8j2fq6ur8Lee99ozudRfpbAsViaTYevWrWjTpg1GjRqFO3fuiB0SkcEJDw+Hp6cngOxZlletWpXvhAaq1K5dG6NHj863TWpqKvbt24cJEyagc+fOaNKkCZo3b46+ffti8eLFBc685+/vL8y+um7dOiH2nElTnJyc4OzsjP79+2P9+vVISEgoVOzFjUs+nsOHDwv7u3fvLux3cHDAzJkzVT7/9evXWLZsGT7//HO0atUKjRs3RocOHTB+/Hh4eXkpJKyFfT1evHiBpUuXok+fPmjRooXCMVXtX79+jaVLl6J3795wcnJC27Zt4e7ujn/++UfpXrWnT59iwYIF6NWrFz755BO0atUKo0ePhp+fX6FeZ6BkroH8fuaiePbsGW7dugUguyS2oA+i8v7++28kJiYCAPr37w8LCwt8+umnALLv8csZgRGD/O+3RCLBihUrijTKVadOHUybNq1YfQcEBCAiIgIA0KpVqzxH9aVSKdzd3YXHJ0+eLFZ/WVlZCAgIAJD9s37++ed5tjUxMRHK1zMzM4UvB0iRtt9DsfrMIf9FSlRUVJ7tLCwshCXu/Pz8EB8fr3bf+oxlsfqqlI1cJiQk4JtvvhH+UwQANzc33Lx5s0RmIyMqrfbs2SMkMp07d0azZs003kdAQABmzJiB8PBwhf1paWl4/PgxHj9+jL179+K7777LdyIFeVeuXMH06dMRExOjsD8kJAQhISE4duwYdu3ale9kHSURV2Fs2bIFa9euRXp6usL+iIgIRERE4PLly9i2bRs2bNhQ6NlKjx49innz5gkzWBbk3Llz+N///qeQhCclJSEgIAABAQEYPnw45s2bB4lEAh8fHyxYsEAh3pSUFFy7dg3Xrl3DpEmT8N133+XbX0m81kX9mfNz9uxZYbtVq1ZFeq58SWxOQvP5558LXzr4+PigY8eOasdYHHv27BHet44dO2q1fE9+5KdTp075tu3cubOwLX/fd1HExMQI10KFChVgbW2db/vatWsL2+fPn9fo77ih0PZ7KFafOV6+fClsqxotlde6dWucP38eqampuHTpkk5N3qVtTC71VSkaubx37x4GDRqktL7Yo0ePsHLlSvzyyy8iRUZkeK5cuSJs5/dNf3FdvHgR3377LdLT0yGRSNCuXTt06NABVapUQVpaGu7evYujR48iLi4Oq1atAoACP+Q9ePAAHh4eSE9Px4ABA9C8eXOYm5vjxYsX8PLywvv37/Hy5Uv89NNPCmVymoyrXr16wuQvu3btgr+/P4DsyUQqVqwotKtatapCv+vXr1cYWevRowc6duwIKysrvHr1CocPH8aLFy8QGhqK4cOHw9vbG3Xq1Mn39QgKCsKmTZsgkUgUXo/Xr1+rvGfo/v372LZtGwBgyJAhcHZ2homJCYKCgnDgwAGkpqZi7969aNasGczMzDBnzhyUL18eAwcORMOGDSGRSHD9+nUcOXJEKGFr06YNWrduXSKvtSZ+5oLI/x588sknhX7es2fPhJLT5s2bC6WWrVu3RvXq1fHmzRv4+fkhOjoaFSpUKHJc6pL/uQYMGKDVvh89eiRsN2nSJN+2tra2qFq1KsLCwhAVFVWs10t+tL0wI8/y7R89egSZTFakEevSQNvvoVh9AtlLnuR8yWRqalrgcnbyfycuX77M5JL0UCmZ0Gfnzp345ptvkJycrHTs+++/x5w5c0SIikSRmQlER4sdhfZVqABoacbjpKQkPHjwQHjs5OSk0fO/f/8eP/74I9LT02FpaYl169ahbdu2Cm0+//xzfP311xg/fjwePnyIP/74Az169Mg3ofLz80OlSpXg4eGhdD/Y0KFDMWjQILx58wbXr1/H3bt30bhxY43HVaFCBfTo0QNA9oeSHO3bt8+z7PDOnTtCQlqmTBn88ccf6Natm0KbsWPHYtasWThx4gQSExMxY8YMHDp0KN8PvVevXkXFihXh4eFRqJHOc+fOoWrVqvjrr78UZkTt06cPunfvjjFjxkAmk2HdunWIj49Hw4YNsX37doWkuV+/fnB2dhb+Jnt4eKhMLkvqGijqz5yfrKws3L17F0D2/ZZFSU7lRy3lkzeJRIL+/ftj48aNSE9Px7FjxwosHde0xMREhd9vbd8XLV/mXJhSXHt7e4SFhQHITtqLmiRYW1vDxMQE6enpiIqKQkxMTL6jl/KzvSYmJuL9+/fF+mKiJNy8eVOpKkPe/fv3he2c34W8NGrUqMi3OuTQ9nuojT5v3LiB2NhYANmVE+/evcOVK1eE8nUTExOlLwlVady4sXC95ZTUl1ZMLvVV7pFLAyuLTUpKwpQpU+Dh4aF0zMrKCh4eHvjiiy9EiIxE4e0NTJ4M5LHWokGrVAlYvx5wcyvxriIjI4XJRsqUKaPxD1bbt28XPiAtW7ZMKanIUalSJaxZswZ9+/ZFZmYmdu3aVWCFwvLly1VONFKhQgVMnDgRc+fOBZBdKpU7uSzJuPKzbds24fWeMmWKUmIJZL8Pv/32G+7fv49nz57h/v37uHTpkkL5lyoLFy4sUpK1bNkylUtttG3bFm3atMG1a9fw+vVrmJiYYO3atSo/aA0aNAibN2/Gq1ev8O+//yIjIwPGxoofM0rytS7qz5yX169fC2vgFTRKLC8jIwNHjx4FAJQtWxa9evVSOD5gwABhYpKDBw9qPbnM/fut7fWf5e9Ds7GxKbC9fCJYnHvYpFIpnJycEBAQAJlMhqNHj+LLL79U2TYjIwPHjx9X2BcXF6czyeUff/wh3D9akHnz5uV7/LfffsPAgQOLFYe230Nt9LlixQrcvn1bab9EIkHr1q0xdepUNG/evMDzlClTBvb29nj+/DlevHiB1NRUlC1btsDnGSJO6KOvDHjk8uHDh2jTpo3KxNLJyQmBgYFMLEubr74qnYklkP1zf/WVVrqS/2bcysqqwPYTJkxQmKwm97+c0lAAwoc7AKhVqxa6d++e77lr166Npk2bAlAs5VOlQYMGeSYpABSOPXnyROFYScaVn7S0NGE9TDMzM4wcOTLPtmXKlMHYsWOFxwUt4F6tWrUCfw55DRs2zLOEFYDCfXldunQRSj3za5uWloZXr14pHCvJ17qoP3N+cpYfAZDveoe5nT9/HpGRkQCyy5tzzwdQs2ZN4UPqo0ePtD4pXVF/vzVNftH6wnzolm+TM0FSUQ0ZMkTYXrNmDYKCgpTaZGZm4pdfflG4vw5AoScBK03EeA/F6BP4/7NEF2XCq5zEViaT4e3bt8XuW99x5FJfGeg9l/v378f48eNV/lGfOHEiVq9eDVNTUxEiIzJ8Rb1HqSiePHmCDx8+AADs7OwUSkfzYmSU/f1naGhovt8CF1S+Kz/6kFP+pI248hMSEoLU1FQA2eWJOUtV5EV+IgtV37LLc3Z2LtL7V9CkTfITWRR0/6F827i4OIVjJflaF/Vnzo98ElaU5FLVRD65DRgwAP/995/QPid51rbSci+hq6srjh07hosXLyIpKQkjR46Eq6srWrduDXNzc4SGhuLYsWN49OgRzMzMUK5cOWFW0JxrTxfs3r073+PaXOfS0Bw4cEDYTkpKwsuXL+Hn5wcPDw+sWbMGO3bswMqVKws1CZf8qGnu/2tKEyaX+srAZotNTU3FDz/8oLCWUQ5zc3Ns3bq1yItYkwHZupVlsVogX3JUmP8Yv/nmG7jlKtdds2YNHj9+rNRWfjQoZwbSooiJicmzRK2gUin5tSZzL+lRknHl573ctayqHDW3ypUrw8zMDElJSQrPVaWo5Y5Fef2K0jYnec5Rkq+1Jks85a8Ri0J+cRseHi7MamlnZycsS5Bb79698euvvyI5ORknT57ErFmzUK5cOfWDLgSxP/iamZkJ/aampiqVTOcmf/2YF/MzjpGREVavXo3Zs2fj9OnTyMjIwLFjx3Ds2DGFduXLl8eqVauE8nlAnNFdXSfGe6jNPs3MzNCwYUM0bNgQ/fr1w/DhwxEREYFvvvkGPj4+BZbdy/eniVmr9RWTS31lQCOXMpkMLi4uKheobdy4Mby9vTVyHw3pMTc3YOBATuhTwmxtbWFkZISsrCykpaUhPDw838RJ1YhXXrOx5h7FKqrcy3TIU2eEoSTjyo98uVZhk4uc5LKgUq+iVncU5fXT1ddakxUt8glyYUsjDx8+jMzMTADAZ599Bmkev7MWFhbo0aMHjh8/joSEBPzzzz8qRznlX+eMjIxCx54TQ+5zANlJr/zv97t377R636WlpaWQJHz48KHAD/7yI8jqLDlmbm6OP/74A/7+/jh06BACAwOFdROrVauGbt26YdSoUbC2thb2SySSApeeKI3EeA/Fum5q1qyJ6dOnY+bMmUhPT8emTZuwZs2afJ8j/7e5NFfZMbnUVwZ0z6VEIsHo0aOVkssxY8Zg/fr1BZaLUSkhlQJ2dmJHYdByvrW9d+8egOzlHXJPSqLOuXN8+eWXmD17tkbOqy6x4pL/gKRqNmxVcu49Ku4IgNh09RrIragj+DKZDAcPHhQee3h4qJwzQBUfHx+VyaX8h+Ki3DcmnwznHnnL/fsdGBgIV1fXQp9bXbVq1UJoaCiA7DLngko3c9oCimtQFlfr1q3zvbf49u3bwhcYH3/8MdfQVkGM91DM60Z+4rTCVFoUt6Te0OhOQTkVjYHNFjtmzBiMGjUKQPa3+Dn/OTOxJNIu+ftKciZf0QT5ERJVZbNiESuuSpUqCdvyU+3nJTw8XEgu5Z+rT3T1GsitevXqwnZhksuAgAClyYsK68aNGwpLYOSwk/sirSjnLmjRd/nf78OHDxf6vJpQv359YTs4ODjftpGRkcJyEhUrVtTKmqCXLl0Stlu2bFni/ekjMd5DMa8b+bL4wlRe5CSXEolE4e9IacPkUl8Z0MglkP2LuHHjRgwYMAABAQEYM2aM2CERlUrDhw8XygIvXLigsRktGzZsKIyk3Lx5E9E6UuIsVlwNGjQQJqcJDAxUmBFRFfnKjoIm1dFVunoN5GZvby+MDj979qzA9vIT+bi4uGDy5MkF/pOfoEl+1DNHgwYNhN/D58+fC7PQFuTmzZvCtqrrRP73+/Lly8LkQtogn9jKJ3KqXLx4UdguaNkdTUhLS8OhQ4eEx7nvJadsYryHYl438l/8FJSopqWlCaOmtWrVUiivL22YXOorPb3n8tGjR3keMzc3x6FDh5TWoCMi7alcubKwLEZWVhamTZuGd+/eqX1eqVSKfv36Acj+T7ige1e0Ray4ypQpg65duwLILnf19PTMs216erpCmeWnn35a4vGVBF29BnIzMjJCkyZNAABhYWEIDw/Ps218fLywNIxUKsX8+fMxZcqUAv/JlwTL36+Zo0yZMsKH6szMTOzZs6fAuK9duyaMCNvZ2alMLuV/v2UyGWbMmKEw0VJBnj59itWrVxe6vbxWrVoJI7IBAQFCeW5umZmZCrOjaqN0d8OGDcLSEa1atRJtFt/iGjhwIB4+fIiHDx+W6EyxYryHYl43+/btE7adnZ3zbXvv3j2hrLqgGbgNHZNLfaVnZbEZGRmYNWsWGjZsiJMnT4odDhHl44cffhDWKwwNDcWgQYNw6tQpYQH2vLx+/TrfD+ITJkwQZqzcv38/VqxYke8kLSkpKTh48GCJ/80QK67x48cLk66sW7dOWPdSXnp6OubMmYOnT58CABwdHQs1Jb6u0tVrIDf51zi/0fvjx48Ls0K2b99eoZw1P7Vq1RI+gEZERCiMuOT4+uuvhSVDtm7dmu9rEBISgp9++kl4PHbs2DxHTn744Qfhg/Lbt28xZMgQ/PPPPwpLEeUWExOD1atXY9CgQcUuaZZKpZg0aZLw+KeffhKW/ZC3cuVKPHjwAED2B/r8rnd3d3dhXV35kUd5t27dUpolOkdmZia2bNmCTZs2Aci+L/XXX38t9M9U2ojxHmq6Ty8vL1y/fj3f6z3nuti7d6+wb/jw4Xm2B7KvsxwdOnTIt62h44Q+hkKHRy7fvn2LYcOGCeUM7u7uCAoKwkcffSRyZESkiomJCf78809Mnz4dly5dQkREBKZNm4aVK1eibdu2aNiwIcqXLw8TExMkJCTg9evXCAwMxH///SeMwJiamipNaFCpUiWsWbMGEyZMQGpqKrZt24bjx4+jV69ecHBwgLm5OZKTk/HmzRvcvXsX169fR3JyMr777rsS/XnFiqtJkyb49ttvsW7dOqSlpWHixIno0aMHOnXqBAsLC7x+/RpHjhwRSjPNzc2xYsUKvV6jUFevgdxcXFywYsUKAMD169fh4uKisl1h1rbMS//+/YUPpD4+PujWrZvC8WbNmmHy5MlYt24d0tPT8cMPP2D37t3o0qULqlWrBmNjY0RGRuLGjRvw8/MTfvc6dOiA0aNH59lvzu/3tGnT8O+//yIiIgJTp07Fxx9/jI4dO6JOnTqwtrZGcnIy3r9/j5s3byIgIEBpaZniGDx4MHx9fXH16lU8fvwY/fv3h5ubG+rWrYuYmBicPHlSKNW1tLTEwoUL1e7zzz//RFBQEDp16oQmTZrAzs4OaWlpePHiBU6fPi3c82xqaoo//vgDNWvWzPd8r1+/VnjfAeDhw4fC9v3795VGdx0dHdGzZ89i/ww3b95UmCxGHY0aNUK1atWK/Xwx3kNN9nn79m0sWLAAVatWRbt27VC/fn1UrFgRJiYmiI+Px6NHj+Dn56cwoj9hwgS0atUq3xhv3LgBAChbtqxC2XtpxOTSUOhocunr6yusE5Tjw4cPGDJkCC5fvgwTExMRoyOivFhZWWHz5s3YvXs3tm7dioiICLx580bpQ1VuZmZm6NOnD6ZMmaJyGZO2bdti3759mD59Op49e4bw8PA8ly8Bsr+1LuxokDrEimvy5MkwMTEREoizZ8/i7NmzSu2qV6+ODRs2oE6dOmr3KTZdvQbkffTRR3B2dkZgYCBOnTqFmTNnKv1/FRISIpToWVpaokePHkXqo0+fPvjtt9+QlpaGixcvIjIyUmkSnsmTJ8Pa2horV65EcnIygoKCEBQUpPJ8RkZGcHNzw88//1zgkjHW1tbYtm0btm/fju3btyMmJgYvXrxQOblQDqlUij59+qiV6BsbG2Pt2rWYMWMGzp8/j4iICJXrW1epUgWrV69GvXr1it2XvNjYWBw/fhzHjx9Xebxu3bpYtGhRgaWPQPYX5jkjnarklKfKGzBggFrJ5R9//FHkdWHz8ttvv2HgwIHFfr4Y72FJ9BkWFqbyfmd5lpaW+OGHHwoctUxMTMSVK1cAAN27dy/1Mw0zuTQEEgmgpUWYCyszMxOLFi3CwoULlUoPTExMMHz48AIXwiUicRkZGeHLL7/EsGHD4Ovri+vXr+POnTuIjIxEbGwsjIyMYGlpiSpVqsDR0RHNmzdH9+7dC1wqo1GjRjh58iTOnj0LPz8/3L59G5GRkUhOToaZmRmqVq2K+vXro3Xr1ujatavWEgux4powYQJ69+4NLy8v/Pvvv3j79i2Sk5NhbW0NBwcHdO/eHYMGDTKoCSJ09RqQN2LECAQGBiI6OhqXLl1C9+7dFY7Lf9HSq1cvYYKmwipfvjy6du2Kf/75BxkZGTh8+DC++uorpXYjR45Enz59cPDgQVy/fh2PHz9GbGwsMjIyUL58edSoUQMtWrTAF198gVq1ahW6f6lUiq+//hojRozA2bNnce3aNdy9exdRUVGIj4+HqakpKlSogAYNGqBFixbo06ePRtZ+tLCwwKZNm+Dr64ujR48iODgYUVFRMDc3R82aNeHi4oKhQ4dq7AP6d999hyZNmuDGjRsIDQ1FVFQUJBIJKlasCEdHR7i4uKBXr178TFIE2n4PNdnn3Llz0adPH9y4cQO3bt3C+/fvER0djcTERJQrVw4VK1aEg4MDOnbsiF69ehXqZzh16pQwsl9QIloaSGT5FR2TTurevTvw7Bn8cr5htLAA4uNFjUleeHg4RowYAT8/P6VjNWvWxIEDB/Jda4pKh5SUFDx//hy1atUq1YsNE5FuysjIQM+ePfHmzRu4uLhg/fr1YodERDpo0KBBCA4ORtOmTeHt7V3s86jzuSjnyy9Vn721jRP6GAIdmszn0qVLcHJyUnlx9+3bF0FBQUwsiYhI5xkbG2PKlCkAsm/xyF3qSER0/fp1Yf3N77//XtxgdASTS0OgA/dbZmVlYenSpejatauwgG0OqVSKZcuW4ejRo1pZCJmIiEgT+vfvjwYNGkAmk2HdunVih0NEOibn3s8OHTqgffv2IkejG5hcGgKRk8uoqCh89tlnmDVrltJSBdWqVcOFCxfw448/FjjBABERkS4xMjLC/PnzIZFIcPbs2TzX2COi0ufixYvw9/eHiYkJfv75Z7HD0Rm8e9kQiFgW6+/vDzc3N7x+/VrpWM+ePeHp6SnKRAxERESa4OzsjJCQELHDICId07lzZ5bLq8ChJEMg4shlUlKSwlpAQPY3vQsXLsSpU6eYWBIRERERlRJMLg2BiMll165d8csvvwiPK1eujLNnz2Lu3LmQSqWixUVERERERNrFslhDIPJssbNnz8aVK1eQmpqKvXv3omrVqqLGQ0RERERE2sfk0hBoYeQyZzlUiUSidMzIyAgHDhyAmZkZFyEmIiIiIiqlWBZrCEo4uYyPj8ewYcPyXUDaysqKiSURERERUSnG5NIQlGBZ7J07d9CiRQvs378f06dPR0BAQIn1RURERERE+ovJpSEogZFLmUwGDw8PtG7dGo8ePQIApKenY/DgwYiOjtZ4f0REREREpN+YXBoCDSeXiYmJGDNmDMaNG4eUlBSFY7GxsVzvizQq535eIiIiotLKUD4P8SY5Q6DBstgHDx5g0KBBuH//vtKxli1b4sCBA/j444811h+VXkZG2d9tZWVliRwJERERkbgyMzMB/P/PR/pKv6OnbBoaufT09ESLFi1UJpZTpkzB5cuXmViSxpiYmEAqlSIxMVHsUIiIiIhElZSUBKlUChMTE7FDUQuTS0OgZnKZnJyMr7/+Gu7u7khKSlI4ZmlpiQMHDmDt2rUoW7asWv0QyZNIJLC0tERcXJzBlIIQERERFZVMJkNcXBwsLS1VLvunT1gWawjUKIt9/Pgx3NzccPv2baVjzZo1g7e3N+rWratOdER5Kl++PGJiYvD27VtUq1ZN7/+gEhERERWFTCbD27dvkZ6ejvLly4sdjtqYXBqCYo5cent7Y9y4cYiPj1c69vXXX2PNmjUoV66cutER5cnMzAz29vYIDQ1FcnIyrKysYGZmBqlUykSTiIiIDJJMJkNmZiaSkpIQFxeH9PR02Nvbw8zMTOzQ1Mbk0hAUY+QyIiICY8eORUJCQq5TmWPz5s0YMWKEpqIjypelpSU++ugjxMbGIiYmBlFRUWKHRERERFTipFIpLC0tUb58eYNILAEml4ahGCOXdnZ22LZtG4YOHSrsc3R0hLe3Nxo2bKjJ6IgKZGZmBjMzM1SpUgXp6emcQZaIiIgMmpGREUxMTAyuUovJpSEoZlnskCFDcOnSJWzcuBFffvklNmzYAHMNLmtCVFQSiQRlypQROwwiIiIiKgYml/pOIgHUuC9y1apV6NatG7744gsNBkVERERERKUNlyLRd+bm2QlmHkJDQ7Fu3bo8j5ctW5aJJRERERERqc2gRy59fX1x7NgxBAcHIzIyEhYWFqhZsyZ69OiBoUOHwtLSUuN9+vv74+DBg/jvv/8QEREBU1NTVK9eHd26dcOQIUNQqVIlzXaYT0ns6dOnMXLkSERFRaFKlSpwc3PTbN9ERERERET/RyIzwNXLExISMGPGDJw/fz7PNlWqVMHq1avh7OyskT4zMjIwf/58+Pj45NmmfPnyWLJkCXr06KFWX927dweePYPfixdAnTrAkydKsSxYsAC//vqrsM/S0hL//fcf6tWrp1bfRERERESkO7p37w4A8PPzEzkSAxy5zMjIwNSpU3H16lUAgK2tLdzc3FC3bl3ExsbixIkTCAwMxLt37zBhwgTs3btXIwnX3LlzcejQIQDZidygQYPQqFEjJCcn49y5c7hw4QJiY2Px/fffY9u2bWjTpo3afQJQGrkMCwvD8OHDceHCBYX98fHx+PHHH3H48GHN9EtERERERCTH4JLLAwcOCIll3bp1sXPnTtja2grHR4wYgWXLlsHDwwNxcXGYN28evLy81Orz4sWLQmJpZ2cHT09PfPzxx8LxIUOGYPfu3Vi8eDHS09MxZ84c/P3335qZFVMuuTx37hyGDx+O8PBwpWaff/45/vrrL/X7IyIiIiIiUsGgJvTJzMzExo0bhcfLly9XSCxzzJgxQ1jLMTAwEJcvX1ar37Vr1wrb8+bNU0gsc7i7u6Nr164AsifZyUlG1WZujqysLCxatAguLi5KiaWxsTFWr16NQ4cOwdraWjN9EhERERER5WJQyWVAQAAiIiIAAK1atYKjo6PKdlKpFO7u7sLjkydPFrvP169f4+7duwAAe3t7uLi45Nl29OjRGulTXoSJCXr37o158+YpLTxfo0YNXL58Gd9//73BLdBKRERERES6xaDKYuVHIDt16pRv286dOwvbly5dKnaf8s/t2LFjvklcixYtYGZmhqSkJNy8eROJiYkwNzcvdt+XAQw9dw5vk5OVjrm6umLXrl2oWLFisc9PRERERERUWAY1cvno0SNhu0mTJvm2tbW1RdWqVQEAUVFRiI6OLvE+jY2N0ahRIwBAVlYWnj59Wqw+ASAWQFdAKbGUSqVYunQpjh8/zsSSiIiIiIi0xqCSy+fPnwvb9vb2BbaXb/Ps2TOt9yn/3KKKApCZa1/VqlVx7tw5/PTTTzAyMqi3loiIiIiIdJxBlcXGx8cL2zY2NgW2l5/gRv65ut7n+/fvYWJiojBxULly5VCpUiX88ssvxTonERERERHpn7CwMEilUrHDAGBgyWVSUpKwXbZs2QLby7dJTEzUSp+mpqZq91m2bFlIJBLUqFGjWM8nIiIiIiLDYGxsrJklDjXAoJJLsWlrRtabN29qpR8iIiIiIqLCMqgb88zMzITt1NTUAtvLtynurK3yfaakpBTYXr6NOjPFEhERERER6RKDSi4tLS2F7Q8fPhTYPiYmRuVzdb1PIiIiIiIiXWNQyWWtWrWE7dDQ0ALby7epXbu21vuUfy4REREREZE+M6jksn79+sJ2cHBwvm0jIyMRFhYGAKhYsSIqVKhQ4n1mZGTg/v37AAAjIyPUrVu3WH0SERERERHpGoNKLjt27ChsX7p0Kd+2Fy9eFLY7d+5c7D47deokbF++fBkymSzPtjdv3hRml23RooXC/ZpERERERET6zKCSy1atWsHOzg4AEBAQgHv37qlsl5mZid27dwuPXV1di91njRo10KRJEwDZJa9nz57Ns+2OHTuE7T59+hS7TyIiIiIiIl1jUMmlVCrFpEmThMc//fQToqKilNqtXLkSDx48AAA4OzsrjHjK8/f3h4ODAxwcHNCtW7c8+50yZYqwvXDhQrx8+VKpjaenJ86fPw8AsLe3x8CBAwv3QxEREREREekBg1vncvDgwfD19cXVq1fx+PFj9O/fH25ubqhbty5iYmJw8uRJ/PfffwCyZ2tduHCh2n127twZAwcOxKFDhxAREYEvvvgCbm5uaNSoEZKTk3Hu3DkhsTQxMcGvv/6qMwudEhERERERaYJElt9NgnoqISEBM2bMEBI6VapUqYLVq1fD2dk5zzb+/v4YNWoUAKB69eo4d+5cnm0zMjIwb948HDx4MM825cuXx5IlS9CjR49C/BRERERERET6wyCTyxy+vr44evQogoODERUVBXNzc9SsWRMuLi4YOnRogetMFiW5lH+Oj48PAgMDERERgbJly6J69ero1q0bhg4dikqVKmnkZyMiIiIiItIlBp1cEhERERERkXYY1IQ+REREREREJA4ml0RERERERKQ2g5stVtf5+vri2LFjCA4ORmRkJCwsLFCzZk306NGjUPeBFoe/vz8OHjyI//77DxERETA1NRXuAx0yZAjvAzVQ2rrWUlNTcf36dVy/fh3BwcF4/vw5YmNjYWJiggoVKqBx48bo1asXevToARMTE430SbpFjL9rua1duxYbNmwQHg8YMABLly4t8X5Ju8S61l68eIFjx47h8uXLePv2LWJjY1G+fHlUrFgRTZo0QatWrdCzZ0+UK1euRPon7dP2tRYVFYWDBw/iypUrePLkCeLi4iCVSmFjY4P69euja9eu6NevH8zNzTXaL4kjMzMTT58+xd27d3Hv3j3cvXsXISEhSElJAVCy/4eVdF7Aey61RFMz2BZFRkYG5s+fDx8fnzzbcAZbw6PNa+3kyZOYO3cuEhMTC2xbv359rFy5Eg4ODmr1SbpDjL9rqoSEhGDQoEFIT08X9jG5NCxiXWtpaWlYvXo1du/erXB9qXLkyBE0bNhQY32TOMS41o4fP45ffvkF8fHx+barVKkSVq5cidatW2ukXxLPlClTcObMmTyPl8T/YdrKC5hcakFGRga+/vprXL16FQBga2srrL0ZGxuLEydOIDAwEABgZWWFvXv3ol69emr3O2vWLBw6dAhA9pqegwYNUlh788KFCwCy197ctm0b2rRpo3afJC5tX2tbtmzB77//DgCwsbFBu3bt0LRpU9jZ2SEjIwMPHjzAkSNH8OHDBwCAtbU1vLy8ULt2bTV/UhKbWH/XcsvMzISbmxvu3bsHMzMzJCUlAWByaUjEutZSU1MxZcoUXLx4EQBgYWGBnj17omnTpihfvjxiYmIQHh6OwMBABAYGwsfHh8mlnhPjWjt37hwmTZqEnI/j9evXh6urK6pVq4a0tDQ8f/4chw4dEv4fLVu2LHx8fFC/fn21+iVxTZo0CX5+fsJja2trWFtb48WLFwBK5v8wreUFMipxe/bskdWvX19Wv359maurqywiIkKpzdKlS4U2Q4cOVbvPCxcuCOdr37697Pnz50ptdu3aJbTp1q2bLDU1Ve1+SVzavtY2b94sc3Nzk/3zzz+ytLQ0lW2io6NlQ4cOFfp0d3dXq0/SDWL8XVNl06ZNsvr168ucnJxk69evF/r76aefSqQ/0j6xrrW5c+cK55wwYYIsKioqz7YfPnyQJSYmaqRfEo8Y15qLi4twvtWrV8uysrKU2iQmJspGjRoltPv222/V7pfE9eeff8pWrlwp+/vvv2WvXr2SyWQy2cGDB0vs/zBt5gWc0KeEZWZmYuPGjcLj5cuXw9bWVqndjBkzhG88AwMDcfnyZbX6Xbt2rbA9b948fPzxx0pt3N3d0bVrVwBAaGio8G0G6ScxrrXBgwfjwIED6NmzZ573U9rY2GDt2rUwNTUFkF3rHxoaWuw+SXxi/V3L7enTp1i/fj0A4IcffkDVqlU1en4Sn1jX2vXr17F//34AgJOTE9avX48KFSrk2d7a2hpmZmZq9UniEuNae/nyJV6+fAkge5R0ypQpkEgkSu3MzMwwZ84c4fGNGzeK3SfphokTJ2L69Ono1asXatSoUeL9aTMvYHJZwgICAhAREQEAaNWqFRwdHVW2k0qlcHd3Fx6fPHmy2H2+fv0ad+/eBQDY29vDxcUlz7ajR4/WSJ8kPjGuNWtr60K1s7OzQ8uWLYXHDx8+LHafJD4xrrXcsrKyMGfOHKSlpaFZs2YYPny4xs5NukOsa23r1q3C9s8//wxjY85/aOjEuNaioqKE7Ro1akAqlebZVj4ZyCn/JyoMbecFTC5LmPw3Wp06dcq3befOnYXtS5cuFbtP+ed27NhR5bdgOVq0aCF823rz5s1CTcxCukmMa60o5Ge4S01N1UqfVDJ04VrbtWsXgoKCYGJigsWLF8PIiP+dGSIxrrWwsDDhnrtGjRqhcePGxT4X6Q8xrjX5kdHQ0FBkZWXl2TZnhBNAidy/ToZL23kB/zcuYY8ePRK2mzRpkm9bW1tboawrKioK0dHRJd6nsbExGjVqBCB7JODp06fF6pPEJ8a1VhTy8VWrVq3E+6OSI/a19urVK6xZswYA8PXXX/ODlgET41q7efOmMLlKzoQW58+fxzfffIMOHTqgcePGaN++PcaOHYs9e/YgLS2tWP2QbhHjWqtZs6YwMU9ERIRCWa68lJQU/Pbbb8LjcePGFas/Kp20nRcwuSxhz58/F7bt7e0LbC/f5tmzZ1rvU/65pF/EuNYKy9/fX+ijQoUKBf5xI90m5rUmk8kwZ84cJCcno06dOpg4caJa5yPdJsa1llM+BmSXIv7www+YOHEizp07h4iICKSnpyMyMhJXr17FwoUL4erqylJ/AyDW37VFixYJlT3r1q1D//79sWnTJhw7dgw+Pj5YuXIlunXrhqtXr8LY2BizZs1Cnz59it0flT7azgt4E0EJk1+zyMbGpsD28vewFbTekS71SeLT1fc9JSUFCxYsEB5PmDAh3/tKSPeJea3t3bsXAQEBkEgkWLRoEcqUKaPW+Ui3iXGt5dx3BwAeHh548eIFjIyM0Lt3b7Rt2xZmZmZ49uwZfHx88O7dO7x+/RqjRo3C4cOHWZWhx8T6u9asWTMcOHAAc+fORWBgIEJCQhASEqLQRiKRYPjw4XB3d+dSXlRk2r62OXJZwuRvui5btmyB7eXbFPf+x6L2mTOLpzp9kvjEuNYKY9asWcK3uk2aNMGIESNKrC/SDrGutbdv3wrrqg4bNgzNmzcv9rlIP4hxrcXGxgrbL168QJkyZbB9+3asWrUKbm5u6NOnD6ZMmYJTp06hVatWAICYmBj88ssvxeqPdIOY/4fWrVsXs2fPRocOHVQel8lkOHz4MLZu3Sqsd0lUWNrOC5hcGrj8btolKmmrVq3CqVOnAGR/E7Z69eo8lywhKsjcuXORmJiIypUrY/r06WKHQwYq537LHBMnTkS7du2U2pmbm2PVqlUoV64cAODChQvCAuhEhZWcnIzp06dj0KBBuHHjhvDFRXBwMAIDA7Fv3z70798fycnJOHToEIYMGYLXr1+LHTbpKW3kBUwuS5j8uleFmSFTvo387JrF7TMlJaXA9vJtitsniU+May0/f/75JzZv3gwAsLKygoeHh1bWcqKSJ8a15uPjgytXrgAA5s+fDwsLi2Kdh/SLGNda7ucNHTo0z7Z2dnbo3r278PjatWvF6pPEJ8a1lpWVha+//honTpyAiYkJduzYgcmTJ6NOnTooU6YMzM3N4eTkhOXLl+PHH38EkD1r7P/+979i9Uelk7bzAiaXJczS0lLYLkwpQ0xMjMrn6nqfJD5det+3bNkizOZpaWmJ7du357lmGOkfbV9r4eHhWLZsGQCgV69eCh/mybCJ8XfNyspK2K5atSoqVqyYb3v5v22vXr0qVp8kPjGutTNnziAgIAAAMHDgQDg7O+fZduzYsahVqxYAICgoCHfu3ClWn1T6aPva5oQ+JaxWrVoIDQ0FkL2GUUGzNOW0BVDsm7Zr1aoFf39/4XytW7cudJ85f7hI/4hxramyZcsW4b44CwsLbN++HU2bNtXY+Ul82r7WTp8+jbi4OADZSwDkNV3/gwcPhO2HDx8K7ezs7ODm5lbkfkl8Yvxdk39eYUbI5T98JSQkFKtPEp8Y19r58+eF7fbt2+fbViKRoG3btsLsnbdv3+b/rVQo2s4LmFyWsPr16wsL8wYHBwtrZqkSGRmJsLAwAEDFihVRoUKFYveZIzg4GF988UWebTMyMnD//n0AgJGREerWrVusPkl8Ylxrucknlubm5ti2bRs++eQTjZybdIe2rzX5e+A8PT0L9Zz79+8Lf9saNGjA5FJPifF3rUGDBsJ2YWZKzPniAyhcMkq6SYxr7f3798J2Ya4d+VF1+UlaiPKj7byAZbElrGPHjsL2pUuX8m178eJFYbtz587F7rNTp07C9uXLl5UmJ5B38+ZN4Q9UixYtFOqySb+Ica3Jk08szczMsG3bNjg5OWnk3KRbxL7WqPQQ41pr0aKFcJ/Ru3fvEBkZmW/7e/fuCdtcJkJ/iXGtySeUb9++LbD9mzdvhG355SKI8qPtvIDJZQlr1aoV7OzsAAABAQEK/wnJy8zMxO7du4XHrq6uxe6zRo0awiL1oaGhOHv2bJ5td+zYIWxzUV79Jsa1lmPr1q1KiWV+946QftP2tTZ69Gg8fPiwwH+//fab8JwBAwYI+48ePVqsfkl8YvxdK1u2LD799FPh8b59+/JsGxERAT8/PwDZ3/IXVNpIukuMa01+ROnEiRP5to2Li1NIalkSS4Wl7byAyWUJk0qlmDRpkvD4p59+QlRUlFK7lStXCvcLOTs7K3yDJs/f3x8ODg5wcHBAt27d8ux3ypQpwvbChQvx8uVLpTaenp5Cvb+9vT0GDhxYuB+KdJJY19r27duxcuVKANmJ5datW7n+oIET61qj0kesa+3bb79FmTJlAACbN29WOQtsYmIifvjhB2FmxT59+qBatWqF/+FIp4hxrbm6usLIKPuj+PXr17F27VqVo0oJCQmYNm2aUILt4OCgUL5NpZcu5gW851ILBg8eDF9fX1y9ehWPHz9G//794ebmhrp16yImJgYnT57Ef//9ByB7YoCFCxeq3Wfnzp0xcOBAHDp0CBEREfjiiy/g5uaGRo0aITk5GefOnRMuIBMTE/z666/Cf6Skv7R9rXl7e2P58uXC46FDhyImJga+vr75Pq9WrVqoU6eOWn2TuMT4u0alkxjXmr29PWbPno0FCxYgLS0NY8eOhaurK9q1a4dy5crh2bNn8Pb2xrt37wAA1atXx+zZs9Xul8Sl7WutTp06GD16NDw8PAAAGzZswPnz59G7d2/Y29sjIyNDqL6IiIgAAJQpUwbz58/nOuZ67vXr1/Dx8VHY9/DhQ2H7/v37WL16tcJxR0dH9OzZs1j9aTMvYHKpBcbGxli7di1mzJiB8+fPIyIiQuVsh1WqVMHq1atRr149jfS7aNEiSCQSHDx4EPHx8cIfL3nly5fHkiVL8r1xnfSHtq+1nP9kc3h4eKi8znKbPHmywrdopH/E+rtGpY9Y19qwYcOQlZWF5cuXIyUlBSdOnFBZuujo6IgNGzZobGI0Eo8Y19qPP/6IMmXKYOvWrcjMzFSYjCw3Ozs7LFu2jNVBBuDt27fYtGlTnsdzbuuQN2DAgGInl4D28gIml1piYWGBTZs2wdfXF0ePHkVwcDCioqJgbm6OmjVrwsXFBUOHDtXoeoPGxsZYsmQJ+vfvDx8fHwQGBiIiIgJly5ZF9erV0a1bNwwdOhSVKlXSWJ8kPjGuNSqdeK2Rtoh1rY0YMQKdOnXC/v37cenSJYSFhSE5ORk2NjZo2rQpXF1d0bt3b6G0kfSftq81iUSCadOmYeDAgTh48CBu3LiB58+fIyEhAUZGRrCxsUGDBg3QqVMn9O/fnzMSU7FpKy+QyPKbMoiIiIiIiIioEPhVGxEREREREamNySURERERERGpjcklERERERERqY3JJREREREREamNySURERERERGpjcklERERERERqY3JJREREREREamNySURERERERGpjcklERERERERqY3JJREREREREamNySURUSlz6NAhODg4wMHBATNnzhQ7HFKDu7u78F76+/tr7Lzr1q0Tzrtu3TqNnZeIiAybsdgBEBEZOnd3dwQEBBT5eUeOHEHDhg1LIKLSYd26dVi/fn2ex8uWLQsrKyvUrl0bzZs3x8CBA1GjRg0tRki6wt/fH6NGjcrzuLGxMSwsLFC5cmU4OjqiV69e6NixI4yM+B09EZE8/lUkIqJSKTU1FREREfD398fGjRvRs2dPLFu2DGlpaWKHplEchVRfRkYGYmJi8PDhQxw6dAhff/01BgwYgMePH2s9Fr6fRKTLOHJJRKRFTZo0QdOmTQvVtkKFCiUcTelRqVIluLi4KOxLTk7G06dPcefOHchkMmRlZcHDwwPv37/HypUrIZFIRIqWxDZixAiFx+np6QgPD0dgYCDi4+MBACEhIRg1ahT279+PmjVrihEmEZHOYXJJRKRFnTt3xpQpU8QOo9T5+OOPMW/ePJXHHj16hB9++EEYhTpx4gR69OiB3r17azPEYtm9e3eJnHfKlCml+jrN61pJSkrC6tWrsWvXLgBAdHQ0lixZgk2bNmkzPCIincWyWCIiKtXq16+P7du3w9LSUti3c+dOESMiXWVmZoY5c+agR48ewr7z588jPDxcxKiIiHQHk0siIir1KleujIEDBwqP79y5g4SEBBEjIl325ZdfKjy+efOmSJEQEekWlsUSEemBqKgoXLhwAQEBAXj48CHevn2LxMRElCtXDra2tnBycoKrqys6duyo0X7v3LmDI0eOICgoCKGhoUhKSoK5uTlsbGxQoUIFNG3aFB06dEDLli1hamqa77mSkpJw5MgRXLp0CQ8fPkR0dDSMjIxgZ2eH5s2bo1+/fmjbtq1G4y+KZs2aCSOWmZmZePv2LerXr6/U7u3bt/D29sbVq1cRGhqKuLg4WFlZwd7eHh06dICbmxuqVq1aqD7VfX3lZyLetWsXWrdurfJYjvXr16ucQXfAgAFYunSp8Fh+pt3JkycrlMjevXsXX3zxBQDA0tISV69eRdmyZQv8WZOTk9GuXTskJSUBNdjTbAAAE35JREFUAI4dOwYHBweVbWUyGXx9feHr64tbt24hMjISaWlpqFChAhwdHeHi4oLPPvsMxsbifIxp1KiRwuP379/n2/7Nmze4ePEibt68iUePHiEsLAwpKSmwsLBApUqV4OzsjAEDBqBZs2Z5nkOd91Oerr+2RKTf+JeDiEjH7dq1C0uXLkVmZqbSsfj4eMTHx+P58+c4dOgQ2rRpgzVr1sDGxkatPjMyMrBw4ULs379f6VhsbCxiY2Px4sULBAYGYseOHZg4cSKmTZuW5/n+/vtv/Prrr4iIiFA69vLlS7x8+RKHDh1C165dsWLFCoUSVW0pX768wmNVI5ebNm3Cxo0bkZqaqrA/KioKUVFRuH37NrZt24bJkyfj66+/zrMvTb++2tS4cWPUqVMHT58+RXx8PM6fP49evXoV+DxfX18hscyZ7VSVkJAQzJw5Ew8ePFA69u7dO7x79w5+fn7YvHkz1q9fj7p166r3AxVD7mQ69/Ugb9myZfjrr78gk8mUjsXExCAmJgaPHj3Cvn370LdvXyxevBjlypXTeMyAfry2RKTfmFwSEem49+/fC4lljRo1UKdOHVSoUAFlypRBfHw8Hj16JExGc/36dYwZMwYHDhxAmTJlit3nsmXLFBKfypUro2nTprCxsYFMJkNMTAyePHmC58+fF3iuHTt2YOnSpcKHa3Nzczg5OaFKlSrIysrCkydPEBwcDJlMhvPnz2PkyJHYt29fiX3AzktsbKzC49wJ7sKFC7Fnzx7hsZmZGVq3bg07OzthSZOkpCSkpqbi999/R2RkJGbPnq2yL02+vnnp0aMH6tWrhzt37iA4OBhA3rMVf/LJJ0U692effYY1a9YAAI4fP16o5PL48ePCdr9+/VS2uXHjBiZOnCgk9sbGxmjcuDFq1aoFY2NjvHnzBv/99x9SU1Px/PlzDB06FPv370edOnWKFL+6co9UVqxYMc+27969g0wmg0QiQa1atVCrVi1YW1vD2NgYMTExePDgAV69egUgezKp+Ph4bN68WWm2YnXfT315bYlIvzG5JCLScR9//DHmzp0LFxcXVK5cWWWbkJAQzJkzB3fv3sWDBw+wbds2TJo0qVj9RUdHY+/evQAAqVSKX3/9FZ9//rnKpTnev3+Pf/75J8+S2GvXrmHZsmWQyWQwMTHBt99+iy+//BJmZmYK7R48eIAZM2bgyZMnCAkJwbJly7BgwYJixV9cgYGBwrZUKlUobT116pRCYvn5559j7ty5sLCwEPYlJCTgl19+wbFjxwBkTwrUvHlzfPrppwr9aPL1zU/OfYHr1q0TkhFNzVbcr18//PHHH5DJZLh48SJiY2OVRn7lRUdH4+rVqwAAIyMjfPbZZ0ptIiIi8P333wvJT9++ffHjjz8qXfORkZFYsGABzp49i/j4eHz//fc4cuQIpFKp2j9XYV2+fFnhcX7JuaOjIzp27IguXbrkubzQzZs3MXv2bLx8+RIXL17EsWPH0L9/f4U26ryf+vTaEpF+Y3JJRKRFFy9exIcPHwpsN2HCBOGD36BBgwps36BBA+zYsQO9e/dGREQE9u7diwkTJhTrQ+Ht27eRkZEBAHB1dcWAAQPybFupUiW4u7urPJaVlYUFCxYgKysLALBkyZI8R6waNmyIHTt2oH///oiKioKPjw8mTpyIKlWqFDn+4ggPD8eRI0eEx02aNBESx6ysLPz+++/CsU8//RRLly5VSgYtLCywfPlyJCYmws/PDwCwcuVKuLi4wMjo/8+fp6nXV0zVq1eHs7Mz/vvvP6Snp+P06dMYMmRInu1PnTol/MytWrVS+SXJ6tWrERkZCQBwc3PD4sWLVZ7L1tYWf/zxB8aMGQN/f388evQI//zzD1xdXTXwkxXs/fv32LBhg/DYyclJ5b25OcaPH1/gOVu0aAEPDw+4uroiNTUVnp6eSsmlOvTltSUi/cfZYomItCg4OBh79uwp8F90dHSRz21paSkskRAREYEnT54UK0b5ew3zGmkpjHPnzuHFixcAgLZt2+aZWOaws7PD6NGjAWQvWv/3338Xu++iePz4McaNG4f4+Hhhn/xsoFeuXEFoaCgAwMTEBD///LPKUUYAkEgkmD9/PkxMTAAAr169Ekbscmjq9RWb/PspX/KqSs5obu7n5YiOjhbOYWlpiVmzZuV7PqlUih9++EHl+UtCeno6QkNDsXfvXnzxxRdCWaydnR2WLFmikT7s7e2FCZmCg4M1Nluxrr+2RGRYOHJJRKRHoqKicOvWLTx9+hRxcXFITk5WmCjk7t27wvaDBw/ynDQlP/LloGfOnMFXX30FOzu7Ip/n0qVLwnafPn0K9Zw2bdoI2//99x/GjBlT5H5VefHiBRYuXKiwLzk5Gc+ePcOdO3eE0VUA6N27N3r37i08vn79urDdqVMnVKpUKd++KleujA4dOuD8+fMAAH9/f4VZfDX1+oqtd+/eWLx4MdLT03Hz5k28ffsW1apVU2r36tUr3L59G0D2RDi5y4QB4N9//0VaWhoAoEuXLjA3Ny+w/08++QTlypVDcnKyQkmzJhTm96Zt27aYN28eateuXejzvn37Fnfu3MGLFy8QFxeH1NRUhd/fnC8xZDIZQkJC0KJFi6IHn4uuvbZEZNiYXBIRaVHuZR0K68mTJ1i5ciUuXbqkctZYVWJiYorcD5D9wbJ69ep48+YNwsLC0LdvX3z++efo2rUrmjVrVuj7/4KCgoTtCxcuqJyhMjf50cOwsLCiB5+H9+/fK9wzqYpEIsGoUaMwY8YMhZFJ+bidnZ0L1Z+zs7OQXN6/f1/hmKZeX7GVL18enTt3hq+vL2QyGU6cOKFyhlz5ka9u3bop3Kea49atW8K2qi8C8pLzPsXGxiIpKUnpXt6S4uTkhGXLluV5D3RuQUFB+P3333Hz5k2Vs8aqUpjy+cLQt9eWiPQbk0siIh13+fJlTJo0SRh9KKziltWZmJhg+fLlmDBhAhISEhATE4MdO3Zgx44dMDExgaOjI1q0aIEuXbqgRYsWeZaIys+o6evrW+Q44uLiihV/YZUtWxZWVlaoVasWmjdvji+++AI1atRQaidfoqxqZE4Ve3t7YTt3kqCp11cX9OvXT3hvjx8/rjK5LMwssfLXSnBwsDBhTVHExcVpLAEaMWKEsJ2VlYWIiAg8fvwYL1++BJCdLA4ZMgS7d+9Wec3I8/Hxwc8//1zopDJHYmJi0QNXQddeWyIybEwuiYh0WHR0NKZNmyYklvb29hg6dCiaN28Oe3t7WFlZoWzZskICsm7dOmFR9aJ+mJXXokULHD16FBs3bsTff/8trE+Ynp6OW7du4datW9i2bRs+/vhjzJgxAy4uLkrnUPeescKO0BZGq1atsHv37mI9N+dnB1DoD9jyy6ioShI08frqgq5du8LKygpxcXF49OgRQkJC0KBBA+F4TgkoAFhbWyuUB8uTH7EurpwJgzRh3rx5SvtkMhn8/Pwwe/ZsxMbGIiwsDJMnT4a3t3eey/48ffoU8+fPF34XHRwc4ObmhmbNmqF69eowNzdXWDNz5syZOHz4MAAolGqrQ9deWyIybEwuiYh02IEDB4QPh40aNYKnp2e+90xparQDyE5klyxZgnnz5iEoKAg3btzAzZs3cfv2baSkpADILrObPHkyZs6cqXR/ZLly5YTYjx49qpB06BP5hFI+0cxPcnKysJ3X+6Xu66sLypQpg08//RTe3t4Askcp5d9n+ZJYV1dXYaKj3OST8dmzZytMqKQrJBIJevTogfLly2PUqFHIyspCSEgItmzZgsmTJ6t8zo4dO4TErHPnztiwYUOerwGg2d/fHPrw2hKR4eBssUREOuzatWvC9jfffFPgZBxv377VeAympqZo27Ytpk6dil27dsHf3x/r1q1TSCJ+//13hIeHKzxPfmH5nNErfSQ/o2th7wN98+aNsG1jY5Nv2+K+vrpCvtT1xIkTwihdZmamwoy/+c0WbGtrK2znlJ7qqpYtWyqUzW7fvl1Y5iM3+d/f7777Lt/EElC8bjRFn15bItJ/TC6JiHSY/P1SdevWzbdtZmamVmZ2NDU1Rc+ePbFr1y5hltP09PR8F5a/cuVKicdVUho2bChsy09SlB/596FRo0ZF6q+wr29hlfQ9my1bthTuRX337h0CAgIAAFevXhWSrpo1a8LJySnPczRt2lTY1odr5dtvvxUmJkpKSsLmzZtVtpP//a1Xr16+54yPj8ejR48K7Luo76e+vbZEpN+YXBIR6TAjo///ZzqnVDIvvr6+iIiIKOmQBOXLl1dIGKKiohSOd+nSRdg+ceKE0nF9Ib88ysWLFwv8OSIiIhQSQfnnF0VBr29hyd8PWBL3zkkkEvTt21d4nDOBj3xJ7GeffZbvOTp27Ahj4+w7dV6+fCnMtKurbGxsMHLkSOHxgQMHVI5eyv/+ypdKq+Lt7Y309PQC+y7q+6lvry0R6Tcml0REOkx+Jko/P78820VHR+O3337TSJ9FWQJBvgxXvnwUAD799FN89NFHALI/WP/vf/8r1IdnIPves8Le31jSOnToIMz+mpaWhiVLluTZViaTCWs/Atkjdu3atVNoo6nXt7Dky3JLqrS2f//+wvY///yDmJgYhes1v5JYIHttUPk28+fPL3SsWVlZCjP6asuYMWOEMvWUlBR4eHgotSns7++LFy+EibgKUtT3Ux9fWyLSX0wuiYh0mPzo35YtW3D06FGlNvfu3cPIkSMRFhamkeUCPD090b9/f+zZs0ehrE9eQkICVqxYgbt37wIApFIpOnTooNBGKpViwYIFkEqlALLLJEeOHIk7d+7k2XdISAh+//13dO3aVVhQXmxGRkaYPn268PjEiRP4+eeflSZfSUhIwOzZs3H69Glh34wZMxRGrwDNvb6FVb9+fWH7ypUrGpk9NLe6desK5cNxcXGYO3eu8OVA06ZN8fHHHxd4jmnTpgllwOHh4fjiiy9w+vTpPGdNDQ8Px86dO9G7d2+cOnVKMz9IEVhbWyvce+nl5aX0xYH87+/SpUtVljZfu3YN7u7uSExMLNTvb3HeT317bYlIf3G2WCIiHTZw4ED89ddfePHiBdLS0vDjjz9i8+bNaNCgAcqWLYtHjx4JCUiDBg3QoUMHbNu2Te1+Q0JCsHDhQixatAg1a9ZEvXr1YGNjg4yMDLx//x5BQUEKI4tfffUVqlatqnSedu3aYcGCBViwYAEyMzNx69YtuLm54eOPP0bDhg1hZWWF1NRUREZG4sGDBzpbOuvq6oqbN29iz549ALJLGE+dOoXWrVvD1tYWUVFRuH79ukLC+eWXX+LTTz9VeT5Nvb6F0aRJE1SrVg1v375FREQEevfujfbt28PGxka4f69JkyZwdXUt1vlz9OvXDw8ePAAAnDlzRmF/YVSqVAkbN27E119/jQ8fPiAiIgLfffcdKlasiKZNm8LW1hZZWVmIiYnB48eP8fr1a7WW29GEMWPGwNPTE0lJSUhKSsLOnTvx/fffC8dHjx4NHx8fREdHIzY2FuPHj4ejoyPq1KkDiUSC+/fv4/HjxwCyR8grVqyo8gskecV5P/XxtSUi/cTkkohIh5UpUwabNm3CV199hdevXwPIXjvv6dOnCu2cnZ2xZs0aHDhwQO0+5WeklclkePnyZZ6zTJqYmGDixIl5LsUAAIMHD0bNmjUxf/58YdbYFy9e5DuDbL169VC+fPlixV9S5s2bB1tbW/z5559IS0tDYmIizp07p9SubNmy+PbbbzFhwgSV59H061sQIyMjLFiwAJMnT0ZaWhoiIiJw5MgRhTYDBgxQO7ns27cvVqxYoTAaZmxsXKTzNm3aFAcPHsScOXOEmVajoqLyvU/Q1tZWKL/WtgoVKmDYsGHYvn07gOxR6XHjxsHS0hJA9ozJGzduxDfffCOMat67dw/37t1TOE+PHj2wdOlS/PrrrwX2Wdz3U99eWyLST0wuiYh0XK1atXDkyBHs2bMHZ86cwfPnz5Geng47OzvUr18fffv2Ra9evYRJO9Q1duxY9OzZE//++y+CgoLw8OFDvHnzBomJiZBIJLCyskLt2rXRpk0bfP7556hevXqB52zTpg3+/vtvnD17FhcuXMDt27cRGRmJhIQEmJqawtbWFrVr14aTkxM6deqkMEOrLpk0aRL69+8Pb29vXLlyBaGhoYiPj4elpSVq1KiBDh06wM3NTZg9VZWSeH0L0rlzZxw6dAienp4IDAzEmzdvkJSUpNHRqUqVKqFt27a4evWqsK99+/YKS9IURvXq1bFjxw4EBQXh9OnTuHHjBt69e4e4uDhIpVJYW1vjo48+QuPGjdGhQwe0atVKY9d+cYwbNw579+5FcnIy4uPjsWvXLnz77bfCcScnJ5w8eRI7d+7E+fPnhS+J7Ozs4OjoiH79+qFbt25F6rO476e+vbZEpH8kMtY9EBERERERkZo4oQ8RERERERGpjcklERERERERqY3JJREREREREamNySURERERERGpjcklERERERERqY3JJREREREREamNySURERERERGpjcklERERERERqY3JJREREREREamNySURERERERGpjcklERERERERqY3JJREREREREamNySURERERERGpjcklERERERERqY3JJREREREREamNySURERERERGp7f8BmJFuKXTa7dcAAAAASUVORK5CYII=", + "application/vnd.jupyter.widget-view+json": { + "model_id": "14f38354b0354bc187be9db34990fcce", + "version_major": 2, + "version_minor": 0 + }, "text/plain": [ - "
" + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" ] }, "metadata": {}, "output_type": "display_data" }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, { "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "4e3d47f0ecdc489ca34de778ebfb3021", + "version_major": 2, + "version_minor": 0 + }, "text/plain": [ - "
" + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" ] }, "metadata": {}, "output_type": "display_data" - } - ], - "source": [ - "cc.plot_roc(\n", - " roc_metric_dict={\"Geneformer\": all_metrics[\"all_roc_metrics\"]},\n", - " model_style_dict={\"Geneformer\": {\"color\": \"red\", \"linestyle\": \"-\"}},\n", - " title=\"Dosage-sensitive vs -insensitive factors\",\n", - " output_directory=output_dir,\n", - " output_prefix=output_prefix,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "d10ac27f-8d70-400e-8a00-d0b84c1d02b4", - "metadata": {}, - "outputs": [ + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "5997f34a471f4a918fd32043fc519bb3", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "affe20b63e08414cb0863e1f6c1aad18", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "fca7f8cafa504738b7eaddd3f7b708fc", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "11f299f23b124674ab9e334bdbe09288", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "01a88ef05cb64f24adecfb5674265a02", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "2f88e6525cbd486c9f03491a04681283", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "8bb884df7370471d986c51c10431ba10", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "4b82e5fe600b4270bb6268e68f76d093", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "cd15c803ecc34a8d878df577ffd80252", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "246cac7b5a0b4fd799e7e2081badbdbf", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "fbc93f4256724314a5141ac29062bae9", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "b38551b3ac134fef8aa0c6ea3b7fa2a0", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "16ddc360a6b64906bd3f1d1adcc94efe", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "44b3af87a1794fc09d00dd3743c4705d", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYoAAAEWCAYAAAB42tAoAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAAuX0lEQVR4nO3df3zNdeP/8cfZZhhjaD8YCZElGiF98qsx1MwYGlep67q4CqVPF24hJUr50lWuui5CxIeiXZeWaEhURkJEY6j8mB9jx6+xOft99v7+sZrWfpxh55yd7Xm/3brdds55nfd57n3L+7n3+33er7fJMAwDERGRErg5O4CIiFRsKgoRESmVikJEREqlohARkVKpKEREpFQqChERKZWKQkRESuXh7AAiFVlISAgXL17E3d0dLy8vunXrxssvv0ytWrUA+OGHH/jnP//JgQMHcHNzo1OnTkycOJE777yzYBnXrl3jnXfe4csvv+Tq1avcdttt9OzZkzFjxlC/fn1n/WoiZaY9ChEbFixYwL59+1izZg2HDh1i0aJFAOzbt4+RI0fSq1cvtm3bxpYtW7jrrrsYPnw4p0+fBiA7O5snn3ySo0ePsnjxYvbu3cvHH3+Mj48PBw4ccOavJVJm2qMQKSNfX1+6du3K4cOHAXjzzTeJiIjgySefLBjz97//nYSEBP71r38xZ84cPvvsM86dO8fy5csL9kIaNGjAM88845TfQeRmaI9CpIySk5PZtm0bt99+OxkZGezbt49+/foVGffwww+zY8cOAHbs2EG3bt0KSkLEFWmPQsSG3/76T09Pp0uXLjz33HNcvXqVvLw8fH19i4z39fUlJSUFgCtXrtCmTRuH5hUpb9qjELFh3rx57Nu3jxUrVnD8+HFSUlKoU6cObm5uXLhwocj4CxcuUK9ePQB8fHyKHSPiSlQUImXUuXNnIiMjmT17Nl5eXgQHB7Nx48Yi4zZs2ECXLl0A+J//+R+2b99Oenq6o+OKlBsVhcgNePLJJ9mxYweHDx9mwoQJrFmzhuXLl3Pt2jWuXr3K3Llz2b9/P88++ywAERERBAQEMG7cOI4dO0ZeXh4pKSksWLCArVu3Ovm3ESkbFYXIDahfvz4RERHMnz+fjh07snjxYr788ku6devGQw89xOHDh1m5ciV33HEHAJ6enixbtozmzZvz17/+lfvuu4+hQ4eSkpJCu3btnPvLiJSRSTcuEhGR0miPQkRESqWiEBGRUqkoRESkVCoKEREplctdmX3//fcTGBjo7BgiIi4lKSmJXbt23dR7Xa4oAgMDiYmJcXYMERGXEhkZedPv1aEnEREplYpCRERKpaIQEZFSqShERKRUKgoRESmVikJEREplt6KYMmUKDzzwAP379y/2dcMwmDlzJqGhoYSHh5OQkGCvKCIicgvsdh1FZGQkjz/+OJMmTSr29bi4OBITE9m0aRM//vgj06dP57///a+94oiI2FTSZNolzbFd0tTbJS6nxPElLf/G8tiL3YqiU6dOnDlzpsTXt2zZwsCBAzGZTAQHB5Oamsr58+fx8/OzVyQRh8jLM8jMtZKWmUtWTh7Z1jxyrHlczcjBMCDn18fJqZl4uruRmZvH6cvpeFf3wGoY5OUZWA0Dax7kGQa5VoNTl9O5rbYn1l9fy8szyDMgLTOHi9eyqVuzGnmG8et/+RuqPIM/PDbIy4PESxb869S4Pj4v/3XrH95rzTO4mpGDh5sJdzdTod+xxO1UOW3w7L0Broo63MJ7nXZlttlsJiAgoOBxQEAAZrO52KKIjo4mOjoaoOCm9SKOdOlaFklXMkhKyeCX89c4dzWDjGwryamZ5FrzN6hXM3I4n5ZVLp/n7mbC3WTCzQ083NwwDANLtpWGdWvgZsrfcLu7mTABaVm5ZORYqedVDZPJhJsJ3NzccDOZMJnA7bfnTCZMJhMN69bgakYOgfVq4v7rc7+97ub2u59/7YarGTn4161RJGP+pxdlKv7pEkaXNr58ll/SG5yVp8TxJb1wo8sp4ZO//PmGFl+I04qiuL8MSlpRUVFRREVFAbd2GbpIaTJzrCRdyeDAmascu3CNA0lXSUrJ4HxaFlczcop9T5P6NcnJNWjTqA5+dapTo5o7lqxcmvvWJs8w8K5RjTo1PPB0d8vfsJtM1K9VjWq/Pq7l6YFXdXequ7vjVd3913K4sQ2GSFl8+e7Nv9dpRREQEEBycnLB4+TkZB12Eoc4n5rJ+gPn+Pn8NU5dSic7N4+DZ6+Snm0tMvaOBl70axNAM99aNK3vRQ1Pd4IC6hBQzF/YIpWV04oiJCSEDz/8kLCwMH788Ue8vb1VFFKurqbn8M3P57l4LZtvj17kQloWOdY8jiSnFRoXUKcGoXf7U6dGNZr71qK5b23aBdalXi1PJyUXqVjsVhTjx49n9+7dpKSk0L17d8aNG0dubi4Aw4cPp0ePHmzdupXQ0FBq1qzJG2+8Ya8oUkWkWLLZefwSXx4yE3vgHFm5eUXGtPKvzeNdbqdfm4YE3+5D7eouN4GyiMPZ7V/J22+/XerrJpOJV155xV4fL1VEXp7B+9uOs+mQmb0n87/oYDLB/c3qc0eDWrS/3YduLX3x866Oh7uuLxW5GfpzSlxOWmYOGw4ks+3oRdb9eLbg+WGdmtC15W30aOWLd41qTkwoUrmoKMQlpGfn8kVCMv/+6ihJVzLIzMmjRjU3Hr4nAD/v6ozvcxd1a6ocROxBRSEVkjk1k53HL7H3ZAoJZ1MLDisBtPSrzSvhbXigRYMiF4KJSPlTUUiFkJljZe3+s+w8cYlj56/x45mrBa+1DvDmyQea0raxD/3uCdAJaBEH0784cZqsXCuf/3iOD749QcLZ1ILnvTzdGdyhMQ/fE0CnZvV1SEnEyVQU4lDXsnJZ9+NZdh6/xNdHzpOamVvw2lPdmzOmRwtdvyBSwagoxCEupGUxM/YQsfHnyM0zqOZuIqS1HxHBgfQK8qO6h7uzI4pICVQUYjcXr2Xxnz2n2Xgwmfhfzznc5e/N6J7NCWvbCE8PXdcg4gpUFFKurHkGa/YlEf39aXYnXgbgnsA6jOrajAHBjWjX2Me5AUXkhqkopFycu5rBu1uO8nn8WdJ+Pe8Q2T6Qv3VvTlDDOk5OJyK3QkUhN+18WiYffneSb36+UHBoqeudt/FopyY8fE8A1TRlhkiloKKQm/Lt0YuM/egHrmbkENzEh7E9WzAguBGtA7T3IFLZqCjkhr2z+Rfmbv6ZujWr8cmYB7ivaX1nRxIRO1JRyA1ZuPUYczf/zL1NfFg04j786+gGPiKVnYpCysQwDOZ/c4w3v/gJkwlWjrqfWppKQ6RK0L90senboxeZsS6Bn83XuLthHd4c2k4lIVKF6F+7lOqn5DQeW7wLgNcH3cOwTrdrxlaRKkZFISU6fTmdIQt24Onhxv+LbEtkh8bOjiQiTqCikGLlWPMYsWQXaZm5fPxUF7o0b+DsSCLiJLoiSor1ytoEEi+lM7xzE5WESBWnopAiTl9OZ+WuU/h5V2dWZDtnxxERJ1NRSBF/W74HgJkD73FyEhGpCFQUUsjibcc5kpxG5zvq06dNgLPjiEgFoKKQApsPmZkZe5jWAd4s+XNHZ8cRkQpCRSEAJF3JYNSvh5zeGdYe7xq6T7WI5FNRCIZh8FRBSQRzV4C3kxOJSEWiohAWxR0n4Wwqj3e5nYjgQGfHEZEKRkUhbDiYDMAr4W2cnEREKiIVRRW34+hF9p++QkRwI92RTkSKpS1DFZaXZ/D6+sMAvNCvtZPTiEhFpaKowp77eB8JZ1Pp1vI2An1qOjuOiFRQKooqas2+JD6PP4enuxtL/9zJ2XFEpAKza1HExcXRt29fQkNDWbRoUZHX09LSGD16NAMGDCAsLIxPPvnEnnHkVxnZVp6P3g/A7qm98NC5CREphd22EFarlVdffZXFixcTGxvL559/ztGjRwuN+eijj2jRogVr165lxYoVzJ49m+zsbHtFkl+9v+04ALMHt8XHy9PJaUSkorNbUcTHx9O0aVOaNGmCp6cnYWFhbNmypdAYk8mExWLBMAwsFgt169bFw0O3yLCnK+nZvP3lz9Ss5s7Q+5o4O46IuAC7bZXNZjMBAdcnlfP39yc+Pr7QmMcee4wxY8bQrVs3LBYLc+fOxc2taHdFR0cTHR0NQEpKir0iVwn/3PwLABP6tMJNtzQVkTKwW1EYhlHkOZOp8IZp+/btBAUFsXz5ck6dOsVf/vIXOnbsSO3atQuNi4qKIioqCoDIyEh7Ra70MnOsLNuRSP1anozq1tzZcUTERdjt0FNAQADJyckFj81mM35+foXGxMTE0KdPH0wmE02bNqVx48YcP37cXpGqvP+34QgAI7o0dXISEXEldiuKtm3bkpiYyOnTp8nOziY2NpaQkJBCYxo2bMh3330HwMWLFzlx4gSNGze2V6QqLTUzh//7LpHmvrV4vndLZ8cRERdit0NPHh4eTJs2jVGjRmG1Whk8eDAtW7Zk1apVAAwfPpyxY8cyZcoUwsPDMQyDiRMnUr9+fXtFqtJejDmAYeTP5/THQ4AiIqWx61eMevToQY8ePQo9N3z48IKf/f39+eCDD+wZQYBTl9L5PP4cdzTwokcrX2fHEREXoyutqoCpaw4AMCNC98AWkRunoqjkVu89w7ZfLtKmUR3tTYjITVFRVHJvfpH/TafZg9s5OYmIuCoVRSW2+ZAZc2oWD97ZgHsC6zo7joi4KBVFJXU+NZNRv94He9Yg7U2IyM1TUVRChmHwp8W7AJgzpB23N/ByciIRcWUqikro9djDHD1/jQ63+zD0Pl3AKCK3RkVRySzYeozF208Q1LAOq0f/jy6uE5FbVuaiSE9Pt2cOKQcbDpzj/204QjV3E//3106aHVZEyoXNovjhhx945JFHeOSRRwA4cuQI06dPt3cuuUEJZ68y5qMf8K7uwebxPfDzruHsSCJSSdgsilmzZrFkyRJ8fHwAaN26NXv27LF3LrkBaZk5zFh7CIB/DgumaYNaTk4kIpVJmeZ6atiwYaHHxd1cSJzDkpXLiCW72X/6Cs/3bkmvIH9nRxKRSsZmUTRs2JAffvgBk8lEdnY2K1asoEWLFo7IJjYYhsHoD/ey//QVngu5k+d7t3J2JBGphGzuGkyfPp2PPvoIs9lMjx49OHz4MK+88oojskkpzqdm0mduHNt+uUj/dg0Z3+cuZ0cSkUrK5h7FiRMneOuttwo9t3fvXu677z67hZLSWfPy9yR+OX+N50LuZFwv3YhIROzH5h7FzJkzy/ScOIYlK5c/L93ND6euENkhkPF97qKau84ZiYj9lLhHsW/fPvbt28fly5dZunRpwfPXrl3DarU6JJwUdvKShcj5O7hkyWZEl6a8NlD3lxAR+yuxKHJyckhPT8dqtWKxWAqer127Nu+++65Dwsl1Jy9ZiFq4k0uWbF4beA8jujR1diQRqSJKLIrOnTvTuXNnBg0aRGBgoCMzye8YhsHy707yytoEAF6LaKOSEBGHsnkyu2bNmsyePZujR4+SlZVV8Pzy5cvtGkwgL89gckw8/9lzhrv8vZkbFczdjeo4O5aIVDE2z4JOnDiR5s2bc+bMGZ599lkCAwNp27atI7JVaYkXLTzy7jb+s+cMoXf7s25cV5WEiDiFzaK4cuUKQ4cOxcPDg86dOzNr1ix+/PFHR2SrshIvWoh8bwfHL1qY1v9uFo24D08PfbNJRJzD5qEnD4/8IX5+fnzzzTf4+fmRnJxs92BV1Ue7TjL104MALPtLJ3re5efkRCJS1dksijFjxpCWlsakSZN47bXXsFgsvPjii47IVuV8uu8MUz89SAvfWrwzrL3ucy0iFYLNonjooYcA8Pb2ZsWKFUD+ldlSfgzD4IkPdrPtl4sArHqqi6YJF5EKo8SisFqtbNiwAbPZTLdu3WjVqhVff/01CxcuJDMzkzVr1jgwZuWVnp1Lu+mbyM0zuLdxXd4Z1l4lISIVSolFMXXqVM6dO0e7du2YOXMmgYGB7Nu3j4kTJ9K7d29HZqy0Dp9LZfSHe8nNM+jfriHvDmuvu9KJSIVTYlEcPHiQtWvX4ubmRlZWFl26dGHTpk34+vo6Ml+ldTUjh4ff2UaNam7Mf6wDj7RtaPtNIiJOUOJ3LqtVq1Zwg6Lq1atzxx13qCTK0V+XfQ/AnCH3qiREpEIrcY/i+PHjhIeHFzw+depUocfr1q2zb7JKbMXOk+w9mYKPVzUG3NvI2XFEREpVYlGsX7/ekTmqjKQrGby85iA1qrmxZXwPZ8cREbGpxKLQRIDl7+j5NELnxgHw4cj7aVC7upMTiYjYZtd5IeLi4ujbty+hoaEsWrSo2DG7du0iIiKCsLAwHn/8cXvGcapDZ1MZOG8HhgHvDm9PxzvqOzuSiEiZ2Lzg7mZZrVZeffVVli5dir+/P0OGDCEkJIQ777yzYExqaiozZsxg8eLFNGrUiEuXLtkrjlP9NsGfyQRvDb1X5yVExKWUaY8iMzOT48eP39CC4+Pjadq0KU2aNMHT05OwsDC2bNlSaMy6desIDQ2lUaP8DWeDBg1u6DNcwaaEZPq9k3+4acHj9zH4vsZOTiQicmNsFsVXX31FREQEo0aNAuDw4cOMHj3a5oLNZjMBAQEFj/39/TGbzYXGJCYmkpqayogRI4iMjKx0V3vP+/ooT63Yi09NT778e3f6tgmw/SYRkQrG5qGnf//736xevZoRI0YAEBQURFJSks0FG4ZR5DmTqfBVx1arlYSEBJYtW0ZmZibDhg3j3nvvpVmzZoXGRUdHEx0dDUBKSorNz3Y2a57BI+9s4ydzGm0D67L8r52pV8vT2bFERG6KzaJwd3fH29v7hhccEBBQaDpys9mMn59fkTH16tXDy8sLLy8vOnbsyJEjR4oURVRUFFFRUQBERkbecBZHi3xvBz+Z0+jcrD4f/62LpuUQEZdm89BTy5YtWbduHVarlcTERF577TXat29vc8Ft27YlMTGR06dPk52dTWxsLCEhIYXG9OrViz179pCbm0tGRgbx8fG0aNHi5n+bCuCz/Un8ePoK1T3ciH5KJSEirs/mHsXLL7/MggUL8PT0ZMKECXTt2pWxY8faXrCHB9OmTWPUqFFYrVYGDx5My5YtWbVqFQDDhw+nRYsWdOvWjQEDBuDm5saQIUNo1arVrf9WTnLpWhb/+/F+AL6e2LPIoTYREVdkMoo7mfA7hw4d4u6773ZUHpsiIyOJiYlxdoxiDV2wg+8TU1g04j766MS1iFQgt7LttLlHMWvWLC5cuEC/fv0ICwujZcuWN/VBld3ibcf5PjGFe5v4qCREpFKxWRQrVqzgwoULbNiwgZdffhmLxcLDDz9cpsNPVcX+01eYGXuY2+t78fHfujg7johIuSrTBXe+vr488cQTzJgxg9atWzN//nx753IZKZZsBs77FoB5f+pATU93JycSESlfNvcojh07xvr16/niiy/w8fHhkUceYfLkyY7I5hKeWrEHgGceakHbxnWdnEZEpPzZLIopU6YQFhbGkiVL8Pf3d0Qml3HykoXvE/MvAJzY5y4npxERsQ+bRfGf//zHETlczvEL14j4d/4hp8VPdNRXYUWk0iqxKP73f/+Xd955p9Bd7X6vKt/h7mpGDiOW7CYtK5f/+2tnerTSLWJFpPIqsSimTp0KwIIFCxwWxhWkZ+fy9Io9JF3J4LWINioJEan0SvzW02/zMq1cuZLAwMBC/61cudJhASuSA2eu0nHmZnYev8zTPZoz4oE7nB1JRMTubH49dseOHUWei4uLs0uYiuxCWhZDFuwgPdvKkic7MuXhIGdHEhFxiBIPPa1cuZJVq1Zx+vTpQucpLBYLHTp0cEi4iiIvz+DPS3eTlZvHv4a3p1eQvv0lIlVHiUURHh5O9+7defvtt5kwYULB87Vq1cLHx8cR2SqMCf/9kYSzqYwPbUW4bmMqIlVMiUVhMplo3Lgx06ZNK/LalStXqkxZxP18gU/3JfHQXb6MC7nT9htERCqZEotiwoQJLFy4kMjISEwmU6E71plMpiL3v66sXvv8EABvDr1X10qISJVUYlEsXLgQyL9ndlX1yd4z/HL+GoM7NOa22tWdHUdExClsfutp7969pKenA/DZZ58xa9Yszp49a/dgFcGbX/wEwOSHWzs5iYiI89gsiunTp1OzZk2OHDnC4sWLadSoES+88IIjsjnV1z+dJzk1ky7N6+Prrb0JEam6bBaFh4cHJpOJzZs388QTT/Dkk09isVgckc2p3vvmGAAvhVWcu/uJiDiDzaKoVasWCxcuZO3atfTs2ROr1Upubq4jsjnNJ3vPsPvEZYZ3bsI9gZo6XESqNptFMXfuXDw9PXnjjTfw9fXFbDYzcuRIR2Rziv/bkciE//4IwKhuzZ2cRkTE+WwWha+vL+Hh4aSlpfH1119TvXp1Bg4c6IBojmdOzeSVtQkA7JgcQgvf2k5OJCLifDaLYv369QwdOpSNGzeyYcOGgp8rm8wcK396fyeQf0vTRj41nZxIRKRisHnjogULFrB69WoaNGgAwOXLl/nzn/9Mv3797B7OkZZsP8GxCxYGtQ8krF1DZ8cREakwbO5RGIZRUBIAPj4+ha7Srgzifr7Am1/8xJ1+tXn70XudHUdEpEKxuUfRtWtXRo4cSVhYGJB/KKp79+52D+ZIL6yOB2D+Yx00TYeIyB/YLIpJkyaxadMm9u7di2EYREVFERoa6ohsDvGzOY3k1ExCWvvRyt/b2XFERCqcEosiMTGR2bNnc/r0aVq1asWkSZPw969892F4e9PPADyrmWFFRIpV4jmKF198kYceeoh3332XNm3a8Nprrzkyl0Mcv3CNjQnJhLT2o8Pt9ZwdR0SkQipxj8JisfDoo48C0Lx5cwYNGuSwUI7yl2XfA/Byf03TISJSkhKLIisri0OHDhV8wykzM7PQ4zZt2jgmoZ0cu3CNk5fSqV3dg2a31XJ2HBGRCqvEovD19WXWrFkFj2+77baCxyaTieXLl9s/nR3NWn8YgFV/6+LkJCIiFVuJRbFixQpH5nCojGwrmw+fB6BtY036JyJSGpsX3FVGE1fnT/r3996tnJxERKTis2tRxMXF0bdvX0JDQ1m0aFGJ4+Lj4wkKCnLIHFIplmxi488B8MxDLez+eSIirs5uRWG1Wnn11VdZvHgxsbGxfP755xw9erTYcf/4xz/o2rWrvaIU8tjiXQD8a3h7PNyr5A6ViMgNKdNcT5999hn//ve/ATh79izx8fE2FxwfH0/Tpk1p0qQJnp6ehIWFsWXLliLjVqxYQd++fQvNJ2Uvy749waFzqfRrE0D4vY3s/nkiIpVBme6ZvX//fmJjY4H8O97NmDHD5oLNZjMBAQEFj/39/TGbzUXGbN68mWHDhpW6rOjoaCIjI4mMjCQlJcXmZxfHMAymrzuEl6c77wwPvqlliIhURTaLIj4+nldeeYXq1asDULduXXJycmwuuLgZZv844d7rr7/OxIkTcXd3L3VZUVFRxMTEEBMTQ716N3cF9c7jlwEYcG8jqnuU/nkiInKdzUkBPTw8sFqtBRv5y5cv4+Zm+9h+QEAAycnJBY/NZjN+fn6Fxhw8eJDx48cDkJKSwtatW/Hw8KB379439EvYYsnKZdIn+YfLxvbUnE4iIjfCZlGMGDGCZ555hkuXLjF37lw2btzI888/b3PBbdu2JTExkdOnT+Pv709sbCxvvfVWoTFfffVVwc+TJ0+mZ8+e5V4SAM+s/IFTl9N57P7bub2BV7kvX0SkMrNZFAMGDKBNmzbs3LkTwzCYP38+LVrY/lqph4cH06ZNY9SoUVitVgYPHkzLli1ZtWoVAMOHD7/19GWwJ/Ey3/x0gdYB3rw+qK1DPlNEpDIxGTZuV3f27Nlin2/UyDnfGoqMjCQmJqbM45/4YDdxP1/gqwk9aO5b247JREQqrhvddv6ezT2Kp59+uuDnrKwszpw5Q7NmzQq+BVWRHT6XStzPF+jW8jaVhIjITbJZFOvWrSv0OCEhgejoaLsFKk/Lvk0E4K9dmzk3iIiIC7vhS5PbtGnDgQMH7JGlXKVn5/Lp/iS8q3vw0F1+tt8gIiLFsrlHsXTp0oKf8/LyOHToEPXr17drqPKw/LuTZOfm8XJYkLOjiIi4NJtFYbFYCn52d3enR48e9O3b166hblVensHsjUcAiGgf6OQ0IiKurdSisFqtWCwWJk2a5Kg85eL19YcxDHgu5E7q1Kjm7DgiIi6txHMUubm5uLu7c+jQIUfmuWWGYbBk+wkAxvVq6eQ0IiKur8Q9iqFDh/Lpp58SFBTE6NGj6devH15e169q7tOnj0MC3qi4Xy4CENk+kGqaRlxE5JbZPEdx9epV6tWrx65duwo9X1GLYlNC/vxS4/vo7nUiIuWhxKK4dOkSS5cupWXLlphMpkKzwf5xFtiK5KNdp2jlX5vG9TSnk4hIeSixKPLy8gp948kVfPHr3sR9TSv+13dFRFxFiUXh6+vLs88+68gst2zzofwbIz3fWyexRUTKS4lne23MFVjh5Fjz+O/eM/jXqY5/nRrOjiMiUmmUWBTLli1zYIxb9/H3pwF4urvtKdBFRKTsSiwKHx8fB8a4ddt+vgDAEw80dXISEZHKpVJcaHA+LZNNh8wMuLcRHrp2QkSkXFWKreoH2xMBGKR5nUREyl2lKIoFW48B0KOVr5OTiIhUPi5fFAeTrgIQ1q4hbm4V90JAERFX5fJFMffLnwEY00PfdhIRsQeXLopcax47j18i0Kcm9wTWdXYcEZFKyaWL4qsj57FkW3mu153OjiIiUmm5dFH8dpHdI20bOjmJiEjl5dJF8e3Ri9xe3wtv3cVORMRuXLYosnKtZOXm0fGOes6OIiJSqblsURy/kD8Feks/bycnERGp3Fy2KL46ch6AB+9s4OQkIiKVm8sWxfZfLhLoU5N2jX2cHUVEpFJz2aLYc/IyLf1rOzuGiEil55JFcdmSTY7VwN9bNygSEbE3lyyKj78/BUDnZro3toiIvblkUVzLzAUgsoOmFRcRsTe7FkVcXBx9+/YlNDSURYsWFXl97dq1hIeHEx4ezrBhwzhy5EiZlvvtsUt4ebpjMmm2WBERe7NbUVitVl599VUWL15MbGwsn3/+OUePHi00pnHjxnz44YesW7eOMWPG8PLLL5dt4YZB3Zq6GltExBHsVhTx8fE0bdqUJk2a4OnpSVhYGFu2bCk0pkOHDtStmz/ra3BwMMnJyWVa9qnL6bRrrNliRUQcwcNeCzabzQQEBBQ89vf3Jz4+vsTxq1evpnv37sW+Fh0dTXR0NAApKSmkpOfQtEGt8g0sIiLFsltRGIZR5LmSzins3LmT1atXs3LlymJfj4qKIioqCoDwiIEAVPdwyfPwIiIux25FERAQUOhQktlsxs/Pr8i4I0eO8NJLL/H+++9Tr57tCf6uZVkB6K77Y4uIOITd/ixv27YtiYmJnD59muzsbGJjYwkJCSk05uzZs4wbN445c+bQrFmzMi03Kze/KIIa1in3zCIiUpTd9ig8PDyYNm0ao0aNwmq1MnjwYFq2bMmqVasAGD58OPPmzePKlSvMmDEDAHd3d2JiYkpf8K9HtGpXt1t0ERH5HZNR3MmECqx9977kPjSeAzP6OjuKiIjLiIyMtP2HeAlc7oxwbp5BNZ3IFhFxGJfb4hqGQT0vXWwnIuIoLlcUWbl5BDfR7U9FRBzF5YoCIM+1TquIiLg0lyyKoIa6T7aIiKO4ZFF4uLlkbBERl+SSW9w7bvNydgQRkSrDJYuiZjVdbCci4iguWRSNfHSvbBERR3HJotBNi0REHMcli6Kau0vGFhFxSS65xdWtskVEHMc1iwI1hYiIo7hmUagnREQcxiWLQkREHMcli0J7FCIijuOaRaFzFCIiDuOaRaGeEBFxGNcsCmcHEBGpQlyzKLRLISLiMK5ZFM4OICJShbhmUagpREQcxkWLQk0hIuIoLlkUIiLiOCoKEREplYpCRERK5XJFobMTIiKO5XJFISIijuV6RaFdChERh3K9olBTiIg4lAsWhYiIOJLLFYX2J0REHMvlikJERBzLrkURFxdH3759CQ0NZdGiRUVeNwyDmTNnEhoaSnh4OAkJCfaMIyIiN8FuRWG1Wnn11VdZvHgxsbGxfP755xw9erTQmLi4OBITE9m0aROvvfYa06dPt1ccERG5SXYrivj4eJo2bUqTJk3w9PQkLCyMLVu2FBqzZcsWBg4ciMlkIjg4mNTUVM6fP1/qcnWOQkTEsTzstWCz2UxAQEDBY39/f+Lj40sdExAQgNlsxs/Pr9C46OhooqOj8wNfSyYyMtJesV1KSkoK9erVc3aMCkHr4jqti+u0Lq47ceLETb/XbkVhGEaR5/44PXhZxgBERUURFRUFQGRkJDExMeWU0rVpXVyndXGd1sV1WhfX3cof2HY79BQQEEBycnLB4+L2FP44Jjk5ucgYERFxLrsVRdu2bUlMTOT06dNkZ2cTGxtLSEhIoTEhISGsWbMGwzDYv38/3t7eKgoRkQrGboeePDw8mDZtGqNGjcJqtTJ48GBatmzJqlWrABg+fDg9evRg69athIaGUrNmTd544w2by/3tEJRoXfye1sV1WhfXaV1cdyvrwmQUd6JARETkV7oyW0RESqWiEBGRUlXYotD0H9fZWhdr164lPDyc8PBwhg0bxpEjR5yQ0jFsrYvfxMfHExQUxMaNGx2YzrHKsi527dpFREQEYWFhPP744w5O6Di21kVaWhqjR49mwIABhIWF8cknnzghpf1NmTKFBx54gP79+xf7+k1vN40KKDc31+jVq5dx6tQpIysrywgPDzd++eWXQmO++eYbY+TIkUZeXp6xb98+Y8iQIU5Ka19lWRd79+41rly5YhhG/nqpyuvit3EjRowwRo0aZWzYsMEJSe2vLOvi6tWrxsMPP2wkJSUZhmEYFy9edEZUuyvLunjvvfeMOXPmGIZhGJcuXTI6depkZGVlOSOuXe3evds4ePCgERYWVuzrN7vdrJB7FPaa/sMVlWVddOjQgbp16wIQHBxc6NqUyqQs6wJgxYoV9O3blwYNGjghpWOUZV2sW7eO0NBQGjVqBFBp10dZ1oXJZMJisWAYBhaLhbp16+LhYbcvfTpNp06dCrYFxbnZ7WaFLIripv8wm82ljvlt+o/Kpizr4vdWr15N9+7dHRHN4cr6/8XmzZsZNmyYo+M5VFnWRWJiIqmpqYwYMYLIyEjWrFnj4JSOUZZ18dhjj3Hs2DG6devGgAEDmDp1Km5uFXLzZ1c3u92skJVqlOP0H67uRn7PnTt3snr1alauXGnvWE5RlnXx+uuvM3HiRNzd3R0VyynKsi6sVisJCQksW7aMzMxMhg0bxr333kuzZs0cFdMhyrIutm/fTlBQEMuXL+fUqVP85S9/oWPHjtSuXdtRMSuEm91uVsii0PQf15VlXQAcOXKEl156iffff7/SToJWlnVx8OBBxo8fD+RPCLd161Y8PDzo3bu3Q7PaW1n/jdSrVw8vLy+8vLzo2LEjR44cqXRFUZZ1ERMTw1NPPYXJZKJp06Y0btyY48eP065dO0fHdaqb3W5WyH0vTf9xXVnWxdmzZxk3bhxz5sypdBuB3yvLuvjqq68K/uvbty+vvPJKpSsJKNu66NWrF3v27CE3N5eMjAzi4+Np0aKFkxLbT1nWRcOGDfnuu+8AuHjxIidOnKBx48bOiOtUN7vdrJB7FPaa/sMVlWVdzJs3jytXrjBjxgwA3N3dK+WMmWVZF1VFWdZFixYtCo7Ju7m5MWTIEFq1auXk5OWvLOti7NixTJkyhfDwcAzDYOLEidSvX9/Jycvf+PHj2b17NykpKXTv3p1x48aRm5sL3Np2U1N4iIhIqSrkoScREak4VBQiIlIqFYWIiJRKRSEiIqVSUYiISKlUFFIhBQUFERERUfDfmTNnShzbvn37W/68yZMnExISQkREBIMGDWLfvn03vIypU6dy9OhRABYsWFDotfKaUuS39dK/f39Gjx5NampqqeMPHz7M1q1by+WzperS12OlQmrfvn2ZN9Y3MrYkkydPpmfPnvTr14/t27cze/Zs1q1bd9PLK49MtpY7adIk7rjjDsaMGVPi+JiYGA4ePMi0adPKPYtUHdqjEJdgsVh48sknGTRoEOHh4WzevLnImPPnz/PYY48V/MW9Z88eIH+en6ioKAYNGsRzzz2HxWIp9bM6derEqVOnAFi6dCn9+/enf//+LFu2DID09HSeeuopBgwYQP/+/Vm/fj0AI0aM4MCBA/zjH/8gMzOTiIgIJkyYAFzf63n++ecL/YU/efJkvvjiC6xWK7Nnz2bw4MGEh4fz8ccf21wnwcHBBRO6xcfHM2zYMAYOHMiwYcM4fvw42dnZvPvuu6xfv56IiAjWr19Peno6U6ZMYfDgwQwcOLDY9ShSxC1Nfi5iJ61btzYGDBhgDBgwwBg7dqyRk5NjpKWlGYaRfz+B3r17G3l5eYZhGEZwcLBhGIaxZMkSY/78+YZh5N+jIC0tzbh06ZLxpz/9ybBYLIZhGMbChQuNf/3rX0U+b9KkSQX3rli/fr0xZMgQ48CBA0b//v0Ni8ViXLt2zXjkkUeMhIQEY+PGjcbUqVML3puammoYhmE8/vjjRnx8fKFMv/nt8aZNm4wXXnjBMAzDyMrKMrp3725kZGQYH3/8sTFv3ryC5wcNGmScOnWqSM7flpObm2uMGzfO2Lp1q2EYhpGWlmbk5OQYhmEY3377rfHss88ahmEYn3zyiTFjxoyC97/11lvGmjVrDMPIv19Fnz59CtaNSEkq5BQeIjVq1OCzzz4reJyTk8Pbb7/N999/j5ubG2azmYsXL+Lr61swpm3btrz44ovk5ubSu3dvgoKC+Prrrzl69GjB9B45OTkEBwcX+5lz5szhvffeo379+rz++ut899139O7dGy8vLwBCQ0PZs2cP3bp1Y/bs2bz55ps89NBDdOzYscy/V/fu3Zk5cybZ2dnExcXRsWNHatSowbfffstPP/3EF198AeTfke3kyZM0adKk0Pt/21NJSkqiTZs2PPjggwXjJ02axMmTJzGZTOTk5BT7+du3b+err77igw8+ACArK4tz585VyjmgpPyoKMQlrFu3jsuXLxMTE0O1atUICQkhKyur0JhOnTrx4YcfsnXrVl544QVGjhxJnTp1ePDBB3n77bdtfsYLL7xAv379Ch7v2LGj2HHNmjUjJiaGrVu38tZbb/Hggw/y7LPPlun3qF69Op07d2bbtm1s2LCBsLAwIH/655deeolu3bqV+v7fCjQtLY2nn36ajz76iCeeeIJ33nmH+++/n3nz5nHmzBmeeOKJEpfx7rvv0rx58zLlFQGdoxAXkZaWRoMGDahWrRo7d+4kKSmpyJikpCQaNGjAo48+yuDBg0lISCA4OJgffviBkydPApCRkcGJEyfK9JmdOnVi8+bNZGRkkJ6ezubNm+nYsSNms5maNWsSERHByJEjOXToUJH3enh4lPhXfVhYGDExMezZs4euXbsC0LVrV1atWlXwnhMnTpCenl5iNm9vb1566SU++OADcnJySEtLw9/fH4BPP/20YFytWrUKnZPp2rUrH374YcF9CYrLLvJH2qMQlxAeHs6YMWOIjIwkKCio2L+Id+/ezZIlS/Dw8MDLy4vZs2dTv359Zs2axfjx48nOzgbyTyiXZTr2Nm3aEBkZydChQwEYMmQId999N9u2bWPOnDm4ubnh4eHB9OnTi7z30UcfZcCAAdx999289dZbhV578MEHmTRpEiEhIXh6egIwdOhQkpKSiIyMxDAM6tWrx/z580vNd/fdd9O6dWtiY2MZNWoUkydPZunSpXTp0qVgzP3338+iRYuIiIjg6aefZuzYsbzxxhsMGDAAwzAIDAxk4cKFNteFVG36eqyIiJRKh55ERKRUKgoRESmVikJEREqlohARkVKpKEREpFQqChERKZWKQkRESvX/Aa/+QEs9g50JAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "****** Crossval split: 4/4 ******\n", + "\n", + "Filtering training data\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "be5426abaf5b41ebb51e2567dd73b0a4", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=50.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Filtered 35%; 32428 remain\n", + "\n", + "Filtering evalation data\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "ff5aad423e4f4bbab54518bc5f0fd028", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=50.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Filtered 53%; 23660 remain\n", + "\n", + "Labeling training data\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "78c25d0976854653be92baf65ca71158", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=10000.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Labeling evaluation data\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "c445de0805e145249f4647e5552292a2", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=5000.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Labeling evaluation OOS data\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "c553f188f56e47acafa77fab9cb2b21f", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=5000.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Some weights of the model checkpoint at /n/home01/ctheodoris/models/210602_111318_geneformer_27M_L6_emb256_SL2048_E3_B12_LR0.001_LSlinear_WU10000_Oadamw_DS12/models/ were not used when initializing BertForTokenClassification: ['cls.predictions.transform.dense.bias', 'cls.predictions.decoder.weight', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.decoder.bias', 'cls.predictions.bias', 'cls.predictions.transform.LayerNorm.weight']\n", + "- This IS expected if you are initializing BertForTokenClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n", + "- This IS NOT expected if you are initializing BertForTokenClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n", + "Some weights of BertForTokenClassification were not initialized from the model checkpoint at /n/home01/ctheodoris/models/210602_111318_geneformer_27M_L6_emb256_SL2048_E3_B12_LR0.001_LSlinear_WU10000_Oadamw_DS12/models/ and are newly initialized: ['classifier.weight', 'classifier.bias']\n", + "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n", + ":45: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", + " batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " [834/834 01:35, Epoch 1/1]\n", + "
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
StepTraining Loss
1000.663500
2000.601800
3000.486200
4000.340400
5000.242700
6000.202300
7000.153600
8000.124400

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "0e1c475ab2ff4bfa8c65a24d587c8ad0", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "2ee8ff99342d4741a3f4ec4176b5d746", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "78a1a6af9439481ebe87731bb2d37c95", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "411ed284d33740eca1f0cef18df500a4", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "aafdf3014691426c9c6acca3834c45f2", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "5aa3add5de134f589eaab69087b66549", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "7d255e53e1c2408697da1fa08860c9c0", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "29b8945f64354ae1b840a1dc316dedbf", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "de251d1fba3d4a67893047ee8275d606", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "8928cf69ea8746b2bef14028c0c0274a", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "0c0c4e21626f4ab99ce0696ee9322e0c", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "9e3499a2376d43bab0086cba34d1b522", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "f33d4f879c294c6a8a6455b3692488d5", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "38dd78e3ebf44c2bad58f9576a525ab3", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "b052e8b179584043945b49de9af31676", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "e3e11781b4394db1a01454ef37a490f2", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, { "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "915efb0adfb44c5caa01cf213c3cd56b", + "version_major": 2, + "version_minor": 0 + }, "text/plain": [ - "{'conf_matrix': Dosage-sensitive TFs Dosage-insensitive TFs\n", - " Dosage-sensitive TFs 61229.0 14801.0\n", - " Dosage-insensitive TFs 9094.0 73907.0,\n", - " 'macro_f1': [0.8489695337205987,\n", - " 0.8637730998133415,\n", - " 0.9122635701525341,\n", - " 0.8180200155972593,\n", - " 0.7913574275548942],\n", - " 'acc': [0.8544562281799618,\n", - " 0.8647275498539312,\n", - " 0.9122812348079727,\n", - " 0.8182044035899506,\n", - " 0.798060129740519],\n", - " 'all_roc_metrics': {'mean_tpr': array([0. , 0.29330305, 0.39824459, 0.48477052, 0.53910681,\n", - " 0.58654819, 0.62233428, 0.65499297, 0.68383714, 0.7105218 ,\n", - " 0.7331015 , 0.75404762, 0.77191402, 0.79007262, 0.80530801,\n", - " 0.81812243, 0.83182971, 0.84348565, 0.85308334, 0.86179954,\n", - " 0.87018186, 0.87841599, 0.88666193, 0.89398957, 0.90104605,\n", - " 0.90768847, 0.91468381, 0.92081589, 0.92687436, 0.93170239,\n", - " 0.93600138, 0.93963402, 0.9430781 , 0.94641134, 0.94881205,\n", - " 0.95143243, 0.95361201, 0.95556462, 0.95766077, 0.95966244,\n", - " 0.96118109, 0.96277551, 0.96448544, 0.96590662, 0.96726595,\n", - " 0.96852001, 0.96991619, 0.97113487, 0.9723888 , 0.97361378,\n", - " 0.97487929, 0.97591807, 0.97725326, 0.97856005, 0.97952476,\n", - " 0.98071045, 0.98164245, 0.98264028, 0.98393822, 0.9850845 ,\n", - " 0.98620898, 0.9872157 , 0.98857151, 0.98954745, 0.99058733,\n", - " 0.99138259, 0.99226871, 0.99306583, 0.99380789, 0.99461065,\n", - " 0.99527049, 0.99592002, 0.99655526, 0.99691174, 0.99757778,\n", - " 0.9978895 , 0.99816814, 0.99852539, 0.99874352, 0.99896924,\n", - " 0.99925024, 0.9993954 , 0.99949426, 0.99964604, 0.99974177,\n", - " 0.99977018, 0.9998233 , 0.99984802, 0.99990114, 0.99994688,\n", - " 0.99996108, 0.99997159, 1. , 1. , 1. ,\n", - " 1. , 1. , 1. , 1. , 1. ]),\n", - " 'mean_fpr': array([0. , 0.01010101, 0.02020202, 0.03030303, 0.04040404,\n", - " 0.05050505, 0.06060606, 0.07070707, 0.08080808, 0.09090909,\n", - " 0.1010101 , 0.11111111, 0.12121212, 0.13131313, 0.14141414,\n", - " 0.15151515, 0.16161616, 0.17171717, 0.18181818, 0.19191919,\n", - " 0.2020202 , 0.21212121, 0.22222222, 0.23232323, 0.24242424,\n", - " 0.25252525, 0.26262626, 0.27272727, 0.28282828, 0.29292929,\n", - " 0.3030303 , 0.31313131, 0.32323232, 0.33333333, 0.34343434,\n", - " 0.35353535, 0.36363636, 0.37373737, 0.38383838, 0.39393939,\n", - " 0.4040404 , 0.41414141, 0.42424242, 0.43434343, 0.44444444,\n", - " 0.45454545, 0.46464646, 0.47474747, 0.48484848, 0.49494949,\n", - " 0.50505051, 0.51515152, 0.52525253, 0.53535354, 0.54545455,\n", - " 0.55555556, 0.56565657, 0.57575758, 0.58585859, 0.5959596 ,\n", - " 0.60606061, 0.61616162, 0.62626263, 0.63636364, 0.64646465,\n", - " 0.65656566, 0.66666667, 0.67676768, 0.68686869, 0.6969697 ,\n", - " 0.70707071, 0.71717172, 0.72727273, 0.73737374, 0.74747475,\n", - " 0.75757576, 0.76767677, 0.77777778, 0.78787879, 0.7979798 ,\n", - " 0.80808081, 0.81818182, 0.82828283, 0.83838384, 0.84848485,\n", - " 0.85858586, 0.86868687, 0.87878788, 0.88888889, 0.8989899 ,\n", - " 0.90909091, 0.91919192, 0.92929293, 0.93939394, 0.94949495,\n", - " 0.95959596, 0.96969697, 0.97979798, 0.98989899, 1. ]),\n", - " 'all_roc_auc': [0.9373324264902606,\n", - " 0.9410936383111078,\n", - " 0.9635257667493496,\n", - " 0.8903987740960708,\n", - " 0.8781592994811886],\n", - " 'roc_auc': 0.9141830130444975,\n", - " 'roc_auc_sd': 0.03204329033266111}}" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "all_metrics" - ] - }, - { - "cell_type": "markdown", - "id": "7007e45e-16c2-47a3-962c-92b9fe867bde", - "metadata": {}, - "source": [ - "### Train gene classifier with all data:" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "6df82c21-937c-4563-ba6b-a52ce287f542", - "metadata": {}, - "outputs": [], - "source": [ - "import datetime\n", - "import pickle\n", - "from geneformer import Classifier\n", - "\n", - "current_date = datetime.datetime.now()\n", - "datestamp = f\"{str(current_date.year)[-2:]}{current_date.month:02d}{current_date.day:02d}{current_date.hour:02d}{current_date.minute:02d}{current_date.second:02d}\"\n", - "datestamp_min = f\"{str(current_date.year)[-2:]}{current_date.month:02d}{current_date.day:02d}\"\n", - "\n", - "\n", - "output_prefix = \"tf_dosage_sens_alldata\"\n", - "output_dir = f\"/path/to/output_dir/{datestamp}\"\n", - "!mkdir $output_dir" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "f031131c-54fd-4ad1-a925-bf0846cc3235", - "metadata": {}, - "outputs": [], - "source": [ - "# Example input_data_file: https://huggingface.co/datasets/ctheodoris/Genecorpus-30M/blob/main/example_input_files/gene_classification/dosage_sensitive_tfs/dosage_sensitivity_TFs.pickle\n", - "with open(\"/path/to/dosage_sensitivity_TFs.pickle\", \"rb\") as fp:\n", - " gene_class_dict = pickle.load(fp)" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "cd27b15c-52d4-46a6-af8c-812c8731f82c", - "metadata": {}, - "outputs": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, { - "name": "stderr", + "name": "stdout", "output_type": "stream", "text": [ - "Hyperparameter tuning is highly recommended for optimal results. No training_args provided; using default hyperparameters.\n" + "\n" ] - } - ], - "source": [ - "cc = Classifier(classifier=\"gene\",\n", - " gene_class_dict = gene_class_dict,\n", - " max_ncells = 10_000,\n", - " freeze_layers = 4,\n", - " num_crossval_splits = 0,\n", - " forward_batch_size=200,\n", - " nproc=16)" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "3d542bda-fbab-4d63-ab58-00d4caa996b9", - "metadata": {}, - "outputs": [ + }, { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "7f77eaec105642b199a9e797fccdbf4b", + "model_id": "ceb10f0f87d044ebab534aefef5ec69c", "version_major": 2, "version_minor": 0 }, "text/plain": [ - "Saving the dataset (0/1 shards): 0%| | 0/33558 [00:00\n", - " \n", - " \n", - " [834/834 02:35, Epoch 1/1]\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
StepTraining Loss
830.700600
1660.643100
2490.544700
3320.412900
4150.298600
4980.205700
5810.138900
6640.103200
7470.090000
8300.083100

" - ], + "application/vnd.jupyter.widget-view+json": { + "model_id": "9da6bd7370db44889cab2fb81dcebe11", + "version_major": 2, + "version_minor": 0 + }, "text/plain": [ - "" + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "12bddf69336d481fb0076dced187523c", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "b89b616cd8064d248b37cc642a09b9bf", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "9346181e5b8b4f1b9a562ca676f87d38", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "de9f0442fc1e43f8bb06e4cecf719d67", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(FloatProgress(value=0.0, max=200.0), HTML(value='')))" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYoAAAEWCAYAAAB42tAoAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAAAt90lEQVR4nO3de1jUdd7/8efAAIKigHLwFJ5XdC0ztVpPSZIWIooaepd1t/qrdHPvXXNTs0w7ubpb3bVbqWvppaWxGVnmIdNKM0tv08JjqykeUEZRTg7nme/vDxQjhBnNmWHw9bgurouZ+cx33vMpvy8+38PnYzIMw0BERKQaPp4uQEREajcFhYiI1EhBISIiNVJQiIhIjRQUIiJSIwWFiIjUSEEhIiI1Mnu6AJHaLDY2lqysLHx9fQkKCqJPnz48/fTT1K9fH4CdO3fyv//7v+zevRsfHx969OjB5MmTadeuXcU2zp8/z6uvvspnn31Gbm4uTZo04Y477mD8+PGEhYV56quJOE0jChEH5s2bx65du1i5ciX79u1jwYIFAOzatYuxY8dy55138tVXX7Fx40Z+85vfMHr0aI4fPw5ASUkJDz74IIcOHWLhwoV89913vPfee4SEhLB7925Pfi0Rp2lEIeKk8PBwevfuzf79+wH429/+RmJiIg8++GBFmz//+c/s3buXf/zjH8ydO5ePPvqIU6dOsWTJkopRSOPGjfnDH/7gke8gcjU0ohBxUmZmJl999RU33HADhYWF7Nq1i0GDBlVpd/fdd7N161YAtm7dSp8+fSpCQsQbaUQh4sDFv/4LCgq47bbb+OMf/0hubi52u53w8PAq7cPDw8nOzgYgJyeHzp07u7VekWtNIwoRB15//XV27drF0qVLOXz4MNnZ2TRs2BAfHx/OnDlTpf2ZM2cIDQ0FICQk5LJtRLyJgkLEST179iQpKYk5c+YQFBRE165dWbduXZV2a9eu5bbbbgPgd7/7HVu2bKGgoMDd5YpcMwoKkSvw4IMPsnXrVvbv38/jjz/OypUrWbJkCefPnyc3N5dXXnmF77//nsceewyAxMREoqKimDhxIj/99BN2u53s7GzmzZvHpk2bPPxtRJyjoBC5AmFhYSQmJvLGG2/QvXt3Fi5cyGeffUafPn3o378/+/fvZ9myZbRq1QoAf39/Fi9eTJs2bfj973/PLbfcwsiRI8nOzubGG2/07JcRcZJJCxeJiEhNNKIQEZEaKShERKRGCgoREamRgkJERGrkdXdm33rrrTRv3tzTZYiIeJWMjAy2bdt2Ve/1uqBo3rw5qampni5DRMSrJCUlXfV7dehJRERqpKAQEZEaKShERKRGCgoREamRgkJERGqkoBARkRq5LCimTZvG7bffzuDBgy/7umEYPP/888TFxZGQkMDevXtdVYqIiPwKLruPIikpifvvv58pU6Zc9vXNmzeTnp7O+vXr+eGHH5g5cybvv/++q8oREfnVLk62bRhgNwzK7NfH5NsuC4oePXpw4sSJal/fuHEjQ4cOxWQy0bVrV/Ly8jh9+jQRERGuKklELjAMA2uJjcISG6U2OyVldvKKSim1GRSX2cjMLcLHZKLMbmC3l+8QbYaBzWbHZsDxcwU0CvSjxGantMxOcZmd/KJSyuxGxU7UZjewG+WfZTN+9rvdID3LSnjDehiGcaHtpdfsF9raDYOSMjuncosIq++Pj6l8B2387Dtc+r3yY2pqR3mNF5pVNLz4fHXv83bdfsV7PXZntsViISoqquJxVFQUFovlskGRkpJCSkoKQMWi9SJSWZnNzsHT5zl61sqJ7EJOZBdy8HQ+9cy+7D+Vh7/Zh/SzBfj7+lBis1+Tz/TzNeHn64Ofrw8NAswEmH3w8THhYwIfk6n8x+dnv194vllIIOcKSrghLAhfkwmTyYTvxXY+5W19L7QtKrMBEBLkjwkwmcCECSp+58Lvl56D8jaXfv/Z86YL766h3eW2z8X3XXjebkB+USkRwfWuSV+62mf/ufr3eiwoLrde0sX/0L+UnJxMcnIy8OtuQxfxRoZh8NMZK0eyrJwvLqWo1E52QQl7MnJJzyrg+LkCisvsVXb+vj4mmjTwp7DExm+igjEMuKllCP6+PjQNCaSwpIxmIYEE+vnib/bBZjdoFOhHcD0/Avx8CDD7EBzgh6+vCV+TCV+fSz9mHxNB/r7V/puV2uez167+vR4LiqioKDIzMyseZ2Zm6rCTXPcMw+Do2QK2HMpi/6k81u+zcCa/+LJt/c0++PmYiG5cn9ZN6hMeHEBkw3p0bBpMu/AGtAgN1I5crgmPBUVsbCzvvPMO8fHx/PDDDwQHByso5LpSUmZn36k8thw8w6HT5/nhRC5HsqyV2kQ2DKBfh3B6tWtM15ahNGngTz0/X4LrmQmu5+ehyuV647KgmDRpEtu3byc7O5u+ffsyceJEysrKABg9ejT9+vVj06ZNxMXFERgYyIsvvuiqUkRqheIyGzvSs1nyTTqZuUX8cCK30uv+vj60blKfm1uGcFfnKHq2DiOsvr+HqhW5xGVB8fLLL9f4uslk4plnnnHVx4t4jM1ucOj0ebLOF3Miu4AN+0/z/fGcKoeQRt7SghahQcTf2JS24fV1mEhqLa9bj0KkNskrKuXHzHwseUVs3H+aL348TU5BaZV2TRvVI+GmZtzWJowBMZFENvSOK2VEQEEh4pBhGBSW2sjILuTo2QJ2HM0m7UQOP2bmc9ZaUqltgNmHHq1CuSU6jF7tGtMyNIhmIYH4mzVbjngvBYXIz1iLy9iTkcvOYzl8fSiLw2fOk1tYirXEVqVt2/D6DPptFJ2bNaJDZAM6NWtIkL/+SUndo/+r5bpQarOTmVtERk4hBy352OwGp/OLyS4o4YfjuTSoZ+boWSuWvMrnEQL9fOneKpTftW2Crw+0btKAjlHBtAwL8tA3EXE/BYV4pfyiUk5kF5KRXcipvCKOnbXi5+tDUamdg6fzCfL35fi5QvadysNkqn4KhiYN/Akw+3K+uIxOTRsysHMQN98Qwq2tGxPZsB6+PjrBLKKgkFrh4txDWRf+yj97voSTuYVYi23kFJZw0HKes+eLycgpIuv85W9AA2gQYMbP10SpzeDGFo0YcUsLDANaNQ7CZhjc1DKE5iGBhAT60SjIjwCzrxu/pYh3UlCISxiGQU5BKeeLyzhrLeGctZiTOUUcOn2ec9YS7IbBgcx8CktsnC8uI7ew6pVCvxRg9qFDZDC3RIcQHhxA+4hgmjaqR6sm9WnaqJ5uQBNxEQWFXLVSm50tB7PYdSybk7lFlJSVz0G0/cg5isscTzrXuVlDzD4m2oTXp31EMAAdo4Jp3MCfIH8z9QN8uSEsiIb1/PDRISARj1FQiFPO5BdjySsi/ayVbw+fJe1ELmm/uLMYoFPThvRpH46/2UTLsCDaNmmAv9mHiOAAWjWpT2iQP4H+Otwj4k0UFFJJbkEpWw5lsfNYNmfPF3PWWsJXB7OqtPPzNXF7m8YM69acvu3DCQ8O0IlfkTpKQXEdMwyDM/nFbPrPGX44kcO6PZYqJ4oD/Xy5vU1j6vn5MPTm5jQK9KNbdCgNdT5A5LqhoLiO/HTmPFsOZvHZPguHz5znZG5RpdfbNKnPb5s3ZPCNzejboYnXLMgiIq6loKjDDMNg7Z5MPvo+g/9Lz+bchekmgvx9iWxYj56tw+jSvBEDYiLp1KwhjQI1ShCRqhQUdYzNbrDtyFne33GCD3dlAFDf35fb2zahW3QIfdqFE9M0GLOv5h4SEecoKOqIUpudl9b/h3mbfgLAxwQ3tWjE7W2bMCmugyalE5GrpqDwYnlFpXy4M4ONB07z/bFs8orK8Df78FCvVky4o50OJYnINaGg8EIncwpZsPkwy7cfo7jMjp+viZ6tw7jv1mgGdY7SzWkick0pKLyEzW7wwXcnePvrIxzIzAdgQEwkY26PplfbxjrnICIuo6DwAq9uOMhbWw6TV1S+5njPVmE8flcHbm3T2MOVicj1QEFRi9nsBrNW7WXJN0cBmDO8CyNuaak7oEXErRQUtVBmbhGvfPYfUnYcByA4wMw3T95JgwD95xIR99Oep5b5/ICF3y/eAUCzRvUY0b0lE+5oSz0/TaQnIp6hoKglSm12xr+zkw37LYQE+fHaqJvp2yHc02WJiCgoaoN1e04x8+N9ZOYVcUt0KK+O6kqLUK3JLCK1g4LCg8psdu6d/w07j+UA8MKw33LfrdGeLUpE5BcUFB5yzlpC7EtfklNQSrcbQpg74ibaRTTwdFkiIlUoKDzg/R3HmZa6mzK7wQO3R/Ns4m89XZKISLUUFG42b9NP/HXtAXx9THww/nfcEh3q6ZJERGqkoHCj5z7Zx1tbjhARHMCKR3/HDY11wlpEaj8FhZtM+vf3pO7MIDTIj88m9dPMriLiNRQUbrB+byapOzMIDjCzffoA/DSBn4h4EQWFCxmGwbvbjvHUyj00aeDPmj/2UUiIiNdxaVBs3ryZF154AbvdzsiRI3n44YcrvZ6fn89f/vIXTp48ic1m4/e//z3Dhw93ZUlu8/WhLF7dcJDt6efw9/VhwQPdiWhYz9NliYhcMZcFhc1m49lnn2XRokVERkYyYsQIYmNjadeuXUWbd999l7Zt2zJv3jzOnTvHoEGDSEhIwN/f31VlucX3x3O4b+E2AP5fn9ZMvTtGM76KiNdyWVCkpaURHR1Ny5YtAYiPj2fjxo2VgsJkMmG1WjEMA6vVSqNGjTCbvfto2LvbjjL9wz0AfDD+dm6JDvNwRSIiv47L9soWi4WoqKiKx5GRkaSlpVVqc9999zF+/Hj69OmD1WrllVdewcen6jH8lJQUUlJSAMjOznZVyb/aoq+PMGvVPgAWP9RDISEidYLLgsIwjCrPmUyVD79s2bKFmJgYlixZwrFjx3jooYfo3r07DRpUnsoiOTmZ5ORkAJKSklxV8q9yJr+4IiS2T7+TiGCdjxCRusFll+BERUWRmZlZ8dhisRAREVGpTWpqKnfddRcmk4no6GhatGjB4cOHXVWSSw1/cysAfxtxo0JCROoUlwVFly5dSE9P5/jx45SUlLB69WpiY2MrtWnatCnffPMNAFlZWRw5coQWLVq4qiSXmb12P8fOFTC8WwtGdm/p6XJERK4plx16MpvNzJgxg3HjxmGz2Rg+fDjt27dn+fLlAIwePZoJEyYwbdo0EhISMAyDyZMnExbmXcf1U3eeYP6mw/ibfXgxSZP7iUjd49JLjPr160e/fv0qPTd69OiK3yMjI3n77bddWYJLXZzgLzTIj/cf/R0BZi1XKiJ1j3dfi+pBf//0R/75xSF8fUxsfqI/wfU0d5OI1E0Kiqvw7Kp9vP31EVqEBrLyD70UEiJSpykortDpvCLe/voIYfX9+WLyHZq7SUTqPO3lrkBRqa1iao45w29USIjIdUF7uiswa9U+Dp4+z//c2Z64TpGeLkdExC0UFE46nV/E8u3H8Df78Oe4Dp4uR0TEbRQUTigps/OX98vnqXrzvm4erkZExL0UFE6Y8O5ONv3nDLEdI4jtGOH4DSIidYjTQVFQUODKOmqt93ccZ8N+CzFNG/LWg92rTGwoIlLXOQyKnTt3cs8993DPPfcAcODAAWbOnOnqumqFF9fs5y8r0mhc35+UR25TSIjIdclhUMyePZu33nqLkJAQADp27MiOHTtcXZfHHTp9ngWby2eyTXnkdhrqpjoRuU45deipadOmld90mcWF6prHlu0EYNFDPWgX0cBBaxGRusvhndlNmzZl586dmEwmSkpKWLp0KW3btnVHbR7z55TvOZCZz6geLen/G528FpHrm8OhwcyZM3n33XexWCz069eP/fv388wzz7ijNo84e76YD3dlADDtnhgPVyMi4nkORxRHjhzhpZdeqvTcd999xy233OKyojxpxLzyhZT+9UB3GgXqvISIiMMRxfPPP+/Uc3XBrmPZHMmyknBTM03RISJyQbUjil27drFr1y7OnTvHokWLKp4/f/48NpvNLcW5k7W4jFELvgXgiYG/8XA1IiK1R7VBUVpaSkFBATabDavVWvF8gwYNeO2119xSnDv9KeV7isvs/DWpCy3DgjxdjohIrVFtUPTs2ZOePXsybNgwmjdv7s6a3M5uN/jyx9MEB5gZ1fMGT5cjIlKrODyZHRgYyJw5czh06BDFxcUVzy9ZssSlhbnT0m+PUmoz+EP/1p4uRUSk1nF4Mnvy5Mm0adOGEydO8Nhjj9G8eXO6dOnijtrcwlpcxt8//RGACXe083A1IiK1j8OgyMnJYeTIkZjNZnr27Mns2bP54Ycf3FGbW7y4Zj/5xWW8knwT/ua6f8e5iMiVcnjoyWwubxIREcGXX35JREQEmZmZLi/MXdJO5AIwtGvdPg8jInK1HAbF+PHjyc/PZ8qUKTz33HNYrVaefPJJd9Tmcpa8InZn5PJft96gmWFFRKrhMCj69+8PQHBwMEuXLgXK78z2doZh8PTKPQDc/dsoD1cjIlJ7VRsUNpuNtWvXYrFY6NOnDx06dOCLL75g/vz5FBUVsXLlSjeWee19knaK9fssNKxnpne7Jp4uR0Sk1qo2KKZPn86pU6e48cYbef7552nevDm7du1i8uTJDBgwwJ01usTFtSbW/amvDjuJiNSg2qDYs2cPH3/8MT4+PhQXF3Pbbbexfv16wsPD3VmfS5SU2dmdkUvj+v40Cwn0dDkiIrVatdeD+vn5VSxQFBAQQKtWrepESAD84/ODADzct42HKxERqf2qHVEcPnyYhISEisfHjh2r9HjVqlWurcxFSm12/vH5IQDG9VFQiIg4Um1QrFmzxp11uM2HO8sXJRrUOQpfH52bEBFxpNqgqIsTAdrtBv/6qvwk9pwRN3q4GhER7+DSOSs2b97MwIEDiYuLY8GCBZdts23bNhITE4mPj+f+++93ZTm8s+0oB0+fJ/7Gplq9TkTESQ5vuLtaNpuNZ599lkWLFhEZGcmIESOIjY2lXbtLE+/l5eUxa9YsFi5cSLNmzTh79qyryqHUZmf+pvLRxMv33uSyzxERqWucGlEUFRVx+PDhK9pwWloa0dHRtGzZEn9/f+Lj49m4cWOlNqtWrSIuLo5mzZoB0Lhx4yv6jCsx5q1tZOQUMvmuDgSYfV32OSIidY3DoPj8889JTExk3LhxAOzfv59HH33U4YYtFgtRUZemxoiMjMRisVRqk56eTl5eHmPGjCEpKclld3sfOp3Pt4fP0bRRPf7QX1OJi4hcCYeHnv75z3+yYsUKxowZA0BMTAwZGRkON2wYRpXnfnkHtM1mY+/evSxevJiioiJGjRrFTTfdROvWlRcQSklJISUlBYDs7GyHn/1Lz32yH4DX7+umu7BFRK6Qw6Dw9fUlODj4ijccFRVVaTpyi8VCRERElTahoaEEBQURFBRE9+7dOXDgQJWgSE5OJjk5GYCkpKQrqsMwDL4+lEXHqGC63RB6xd9DROR65/DQU/v27Vm1ahU2m4309HSee+45br75Zocb7tKlC+np6Rw/fpySkhJWr15NbGxspTZ33nknO3bsoKysjMLCQtLS0mjbtu3Vf5vLWLw1nTK7wbCb697lviIi7uBwRPH0008zb948/P39efzxx+nduzcTJkxwvGGzmRkzZjBu3DhsNhvDhw+nffv2LF++HIDRo0fTtm1b+vTpw5AhQ/Dx8WHEiBF06NDh13+rCwzDYNaqfQCM7a31sEVErobJuNzJhJ/Zt28fnTp1clc9DiUlJZGamupU283/OcMDb2+nR6tQ3n/0dy6uTESk9rqSfecvORxRzJ49mzNnzjBo0CDi4+Np3779VX2QJ8zb9BMAC8Z093AlIiLey2FQLF26lDNnzrB27VqefvpprFYrd999t1OHnzxp609ZbP3pLENuakZofX9PlyMi4rWcuuEuPDycBx54gFmzZtGxY0feeOMNV9f1q735Zflo4s9x1+6ch4jI9cjhiOKnn35izZo1fPrpp4SEhHDPPfcwdepUd9R21X7MzOerg1n0bB1G6yb1PV2OiIhXcxgU06ZNIz4+nrfeeovIyEh31PSrLd56BIDnh/7Ww5WIiHg/h0Hx73//2x11XFPbj5zjphaN6BB55TcKiohIZdUGxf/8z//w6quvVlrV7udq6wp3J7IL+OmMlYd6tfJ0KSIidUK1QTF9+nQA5s2b57ZiroVXN5Svhz28WwsPVyIiUjdUe9XTxXmZli1bRvPmzSv9LFu2zG0FXql9p/IwmeC3zRt5uhQRkTrB4eWxW7durfLc5s2bXVLMtbD3ZB6/0bkJEZFrptpDT8uWLWP58uUcP3680nkKq9VKt27d3FLclSooKQOgXUQDD1ciIlJ3VBsUCQkJ9O3bl5dffpnHH3+84vn69esTEhLijtqu2PYj5wDoHq3pxEVErpVqg8JkMtGiRQtmzJhR5bWcnJxaGRZvbSm/f2JIV00pLiJyrVQbFI8//jjz588nKSkJk8lUacU6k8lUZf1rT1u3J5OvDmZxV6dIwjS3k4jINVNtUMyfPx8oXzPbGyzYXD63k+7GFhG5thxe9fTdd99RUFAAwEcffcTs2bM5efKkywu7UjuP5WAyQUTDep4uRUSkTnEYFDNnziQwMJADBw6wcOFCmjVrxhNPPOGO2px2Or8IgLs6ecdcVCIi3sRhUJjNZkwmExs2bOCBBx7gwQcfxGq1uqM2p63fawHgni5NPVyJiEjd4zAo6tevz/z58/n444+54447sNlslJWVuaM2p5Ta7MxZewCAATEaUYiIXGsOg+KVV17B39+fF198kfDwcCwWC2PHjnVHbU754sBp8ovLeLhvG+oHOJwMV0RErpDDoAgPDychIYH8/Hy++OILAgICGDp0qBtKc873x3MAuP/WaM8WIiJSRzkMijVr1jBy5EjWrVvH2rVrK36vLb46mEU9Px9ahgV6uhQRkTrJ4bGaefPmsWLFCho3bgzAuXPn+O///m8GDRrk8uKcsTsjlz7tm2AymTxdiohIneRwRGEYRkVIAISEhFS6S9uTcgtKAbQutoiICzkcUfTu3ZuxY8cSHx8PlB+K6tu3r8sLc0ZGTiEA7TVbrIiIyzgMiilTprB+/Xq+++47DMMgOTmZuLg4d9Tm0H8s+QC0bqKgEBFxlWqDIj09nTlz5nD8+HE6dOjAlClTiIysXfcpfPHjaQC6aDU7ERGXqfYcxZNPPkn//v157bXX6Ny5M88995w763LK+r0W6vv70ijIz9OliIjUWdWOKKxWK/feey8Abdq0YdiwYW4ryhnW4jIKS230bB3m6VJEROq0aoOiuLiYffv2VVzhVFRUVOlx586d3VNhNbILSgBI7NrMo3WIiNR11QZFeHg4s2fPrnjcpEmTiscmk4klS5a4vroa7DyWA0A9s69H6xARqeuqDYqlS5e6s44rticjF4De7Zt4uBIRkbrN4Q13tVVhiQ2ASC1UJCLiUi4Nis2bNzNw4EDi4uJYsGBBte3S0tKIiYm5ojmkjp4rIMhfh51ERFzNZUFhs9l49tlnWbhwIatXr+aTTz7h0KFDl23397//nd69e1/R9guKyzR1h4iIGzg119NHH33EP//5TwBOnjxJWlqaww2npaURHR1Ny5Yt8ff3Jz4+no0bN1Zpt3TpUgYOHFhpPilnHMmyUs9PIwoREVdzas3s77//ntWrVwPlK97NmjXL4YYtFgtRUVEVjyMjI7FYLFXabNiwgVGjRtW4rZSUFJKSkkhKSiI7OxuAs9YSGgXqRjsREVdzGBRpaWk888wzBAQEANCoUSNKS0sdbvhyM8z+cirwF154gcmTJ+PrW/PIIDk5mdTUVFJTUwkNDaXMZgegRajWoBARcTWHkwKazWZsNlvFTv7cuXP4+Dg+tREVFUVmZmbFY4vFQkRERKU2e/bsYdKkSQBkZ2ezadMmzGYzAwYMqHHbmXlFAITV93dYh4iI/DoOg2LMmDH84Q9/4OzZs7zyyiusW7eOP/3pTw433KVLF9LT0zl+/DiRkZGsXr2al156qVKbzz//vOL3qVOncscddzgMCYBjZwsAaNVYJ7NFRFzNYVAMGTKEzp078+2332IYBm+88QZt27Z1vGGzmRkzZjBu3DhsNhvDhw+nffv2LF++HIDRo0dfddHZFxYsuqFx0FVvQ0REnOMwKE6ePElgYCD9+/ev9FyzZo7nWOrXrx/9+vWr9Fx1AfHXv/7V4fYu+vHCOhQ6mS0i4noOg+KRRx6p+L24uJgTJ07QunXriqugPGHroSwAWoZqRCEi4moOg2LVqlWVHu/du5eUlBSXFeSM/1jyaRBgxt/stTOQiIh4jSve03bu3Jndu3e7ohan5RWV0UeTAYqIuIXDEcWiRYsqfrfb7ezbt4+wMM8tFnTx9gydnxARcQ+HQWG1Wit+9/X1pV+/fgwcONClRdWkuKz8Zrtu0aEeq0FE5HpSY1DYbDasVitTpkxxVz0O2S8MKSKCAzxciYjI9aHacxRlZWX4+vqyb98+d9bjUMmF6Tv8fXUiW0TEHaodUYwcOZIPP/yQmJgYHn30UQYNGkRQ0KXLUe+66y63FPhLNlv5iKJlmC6NFRFxB4fnKHJzcwkNDWXbtm2VnvdUUBSWlq9spwkBRUTco9qgOHv2LIsWLaJ9+/aYTKZKs8H+chZYd7p46MmTNYiIXE+qDQq73V7piqfawm4YRDbUiWwREXepNijCw8N57LHH3FmLcwzoEBns6SpERK4b1V46dLmFh2qDEpud4HoOT62IiMg1Um1QLF682I1lXAEDsq2OV9gTEZFro9qgCAkJcWMZzjOAmKYNPV2GiMh1w+vuWrMbBgF+Xle2iIjX8so9bnGp3dMliIhcN7wyKJrrZjsREbfxyqDw1b12IiJu451B4aOkEBFxFy8NCq8sW0TEK3nlHlczjIuIuI9X7nJ9NCGgiIjbeGVQFF2YalxERFzPK4OiaSNdHisi4i5eGRS+uj5WRMRtvDIozLo8VkTEbbwyKHQfhYiI+3hlUJh1H4WIiNt45R7XXksXVRIRqYu8MijC6vt7ugQRkeuGVwaFbrgTEXEflwbF5s2bGThwIHFxcSxYsKDK6x9//DEJCQkkJCQwatQoDhw44NR2dTJbRMR9XBYUNpuNZ599loULF7J69Wo++eQTDh06VKlNixYteOedd1i1ahXjx4/n6aefdmrbvhpRiIi4jcuCIi0tjejoaFq2bIm/vz/x8fFs3LixUptu3brRqFEjALp27UpmZqZT29ZFTyIi7mN21YYtFgtRUVEVjyMjI0lLS6u2/YoVK+jbt+9lX0tJSSElJaXisQ49iYi4j8uCwrjMJaymag4Zffvtt6xYsYJly5Zd9vXk5GSSk5MBaH/rnTr0JCLiRi4LiqioqEqHkiwWCxEREVXaHThwgKeeeop//etfhIaGOrXtev6+16xOERGpmcuO9nfp0oX09HSOHz9OSUkJq1evJjY2tlKbkydPMnHiRObOnUvr1q2d3rafTlKIiLiNy0YUZrOZGTNmMG7cOGw2G8OHD6d9+/YsX74cgNGjR/P666+Tk5PDrFmzAPD19SU1NdXhtnXkSUTEfUzG5U4m1GLtb72T3VvWU89Ph59ERJyVlJTk1B/il6NjOCIiUiMFhYiI1Mgrg0LnKERE3Mc7gwIlhYiIu3hlUIiIiPt4ZVDo0JOIiPt4ZVCIiIj7eGVQaEAhIuI+3hkUOvYkIuI2XhkUIiLiPl4ZFBpPiIi4j1cGhYiIuI9XBoVOUYiIuI+XBoWSQkTEXbwyKERExH0UFCIiUiMFhYiI1EhBISIiNVJQiIhIjRQUIiJSI68LCl0YKyLiXl4XFCIi4l4KChERqZH3BYWOPYmIuJX3BYWIiLiVFwaFhhQiIu7khUEhIiLupKAQEZEaeV1Q6MCTiIh7eV1QiIiIeykoRESkRgoKERGpkYJCRERq5NKg2Lx5MwMHDiQuLo4FCxZUed0wDJ5//nni4uJISEhg7969rixHRESugsuCwmaz8eyzz7Jw4UJWr17NJ598wqFDhyq12bx5M+np6axfv57nnnuOmTNnuqocERG5Si4LirS0NKKjo2nZsiX+/v7Ex8ezcePGSm02btzI0KFDMZlMdO3alby8PE6fPl3jdnV5rIiIe5ldtWGLxUJUVFTF48jISNLS0mpsExUVhcViISIiolK7lJQUUlJSygs+n0lSUpKryvYq2dnZhIaGerqMWkF9cYn64hL1xSVHjhy56ve6LCgMw6jynMlkuuI2AMnJySQnJwOQlJREamrqNarSu6kvLlFfXKK+uER9ccmv+QPbZYeeoqKiyMzMrHh8uZHCL9tkZmZWaSMiIp7lsqDo0qUL6enpHD9+nJKSElavXk1sbGylNrGxsaxcuRLDMPj+++8JDg5WUIiI1DIuO/RkNpuZMWMG48aNw2azMXz4cNq3b8/y5csBGD16NP369WPTpk3ExcURGBjIiy++6HC7Fw9Bifri59QXl6gvLlFfXPJr+sJkXO5EgYiIyAW6M1tERGqkoBARkRrV2qDQ9B+XOOqLjz/+mISEBBISEhg1ahQHDhzwQJXu4agvLkpLSyMmJoZ169a5sTr3cqYvtm3bRmJiIvHx8dx///1urtB9HPVFfn4+jz76KEOGDCE+Pp4PPvjAA1W63rRp07j99tsZPHjwZV+/6v2mUQuVlZUZd955p3Hs2DGjuLjYSEhIMA4ePFipzZdffmmMHTvWsNvtxq5du4wRI0Z4qFrXcqYvvvvuOyMnJ8cwjPJ+uZ774mK7MWPGGOPGjTPWrl3rgUpdz5m+yM3NNe6++24jIyPDMAzDyMrK8kSpLudMX7z55pvG3LlzDcMwjLNnzxo9evQwiouLPVGuS23fvt3Ys2ePER8ff9nXr3a/WStHFK6a/sMbOdMX3bp1o1GjRgB07dq10r0pdYkzfQGwdOlSBg4cSOPGjT1QpXs40xerVq0iLi6OZs2aAdTZ/nCmL0wmE1arFcMwsFqtNGrUCLPZZRd9ekyPHj0q9gWXc7X7zVoZFJeb/sNisdTY5uL0H3WNM33xcytWrKBv377uKM3tnP3/YsOGDYwaNcrd5bmVM32Rnp5OXl4eY8aMISkpiZUrV7q5Svdwpi/uu+8+fvrpJ/r06cOQIUOYPn06Pj61cvfnUle736yVkWpcw+k/vN2VfM9vv/2WFStWsGzZMleX5RHO9MULL7zA5MmT8fX1dVdZHuFMX9hsNvbu3cvixYspKipi1KhR3HTTTbRu3dpdZbqFM32xZcsWYmJiWLJkCceOHeOhhx6ie/fuNGjQwF1l1gpXu9+slUGh6T8ucaYvAA4cOMBTTz3Fv/71rzo7CZozfbFnzx4mTZoElE8It2nTJsxmMwMGDHBrra7m7L+R0NBQgoKCCAoKonv37hw4cKDOBYUzfZGamsrDDz+MyWQiOjqaFi1acPjwYW688UZ3l+tRV7vfrJVjL03/cYkzfXHy5EkmTpzI3Llz69xO4Oec6YvPP/+84mfgwIE888wzdS4kwLm+uPPOO9mxYwdlZWUUFhaSlpZG27ZtPVSx6zjTF02bNuWbb74BICsriyNHjtCiRQtPlOtRV7vfrJUjCldN/+GNnOmL119/nZycHGbNmgWAr69vnZwx05m+uF440xdt27atOCbv4+PDiBEj6NChg4crv/ac6YsJEyYwbdo0EhISMAyDyZMnExYW5uHKr71Jkyaxfft2srOz6du3LxMnTqSsrAz4dftNTeEhIiI1qpWHnkREpPZQUIiISI0UFCIiUiMFhYiI1EhBISIiNVJQSK0UExNDYmJixc+JEyeqbXvzzTf/6s+bOnUqsbGxJCYmMmzYMHbt2nXF25g+fTqHDh0CYN68eZVeu1ZTilzsl8GDB/Poo4+Sl5dXY/v9+/ezadOma/LZcv3S5bFSK918881O76yvpG11pk6dyh133MGgQYPYsmULc+bMYdWqVVe9vWtRk6PtTpkyhVatWjF+/Phq26emprJnzx5mzJhxzWuR64dGFOIVrFYrDz74IMOGDSMhIYENGzZUaXP69Gnuu+++ir+4d+zYAZTP85OcnMywYcP44x//iNVqrfGzevTowbFjxwBYtGgRgwcPZvDgwSxevBiAgoICHn74YYYMGcLgwYNZs2YNAGPGjGH37t38/e9/p6ioiMTERB5//HHg0qjnT3/6U6W/8KdOncqnn36KzWZjzpw5DB8+nISEBN577z2HfdK1a9eKCd3S0tIYNWoUQ4cOZdSoURw+fJiSkhJee+011qxZQ2JiImvWrKGgoIBp06YxfPhwhg4detl+FKniV01+LuIiHTt2NIYMGWIMGTLEmDBhglFaWmrk5+cbhlG+nsCAAQMMu91uGIZhdO3a1TAMw3jrrbeMN954wzCM8jUK8vPzjbNnzxr/9V//ZVitVsMwDGP+/PnGP/7xjyqfN2XKlIq1K9asWWOMGDHC2L17tzF48GDDarUa58+fN+655x5j7969xrp164zp06dXvDcvL88wDMO4//77jbS0tEo1XXTx8fr1640nnnjCMAzDKC4uNvr27WsUFhYa7733nvH6669XPD9s2DDj2LFjVeq8uJ2ysjJj4sSJxqZNmwzDMIz8/HyjtLTUMAzD+Prrr43HHnvMMAzD+OCDD4xZs2ZVvP+ll14yVq5caRhG+XoVd911V0XfiFSnVk7hIVKvXj0++uijiselpaW8/PLL/N///R8+Pj5YLBaysrIIDw+vaNOlSxeefPJJysrKGDBgADExMXzxxRccOnSoYnqP0tJSunbtetnPnDt3Lm+++SZhYWG88MILfPPNNwwYMICgoCAA4uLi2LFjB3369GHOnDn87W9/o3///nTv3t3p79W3b1+ef/55SkpK2Lx5M927d6devXp8/fXX/Pjjj3z66adA+YpsR48epWXLlpXef3GkkpGRQefOnenVq1dF+ylTpnD06FFMJhOlpaWX/fwtW7bw+eef8/bbbwNQXFzMqVOn6uQcUHLtKCjEK6xatYpz586RmpqKn58fsbGxFBcXV2rTo0cP3nnnHTZt2sQTTzzB2LFjadiwIb169eLll192+BlPPPEEgwYNqni8devWy7Zr3bo1qampbNq0iZdeeolevXrx2GOPOfU9AgIC6NmzJ1999RVr164lPj4eKJ/++amnnqJPnz41vv9igObn5/PII4/w7rvv8sADD/Dqq69y66238vrrr3PixAkeeOCBarfx2muv0aZNG6fqFQGdoxAvkZ+fT+PGjfHz8+Pbb78lIyOjSpuMjAwaN27Mvffey/Dhw9m7dy9du3Zl586dHD16FIDCwkKOHDni1Gf26NGDDRs2UFhYSEFBARs2bKB79+5YLBYCAwNJTExk7Nix7Nu3r8p7zWZztX/Vx8fHk5qayo4dO+jduzcAvXv3Zvny5RXvOXLkCAUFBdXWFhwczFNPPcXbb79NaWkp+fn5REZGAvDhhx9WtKtfv36lczK9e/fmnXfeqViX4HK1i/ySRhTiFRISEhg/fjxJSUnExMRc9i/i7du389Zbb2E2mwkKCmLOnDmEhYUxe/ZsJk2aRElJCVB+QtmZ6dg7d+5MUlISI0eOBGDEiBF06tSJr776irlz5+Lj44PZbGbmzJlV3nvvvfcyZMgQOnXqxEsvvVTptV69ejFlyhRiY2Px9/cHYOTIkWRkZJCUlIRhGISGhvLGG2/UWF+nTp3o2LEjq1evZty4cUydOpVFixZx2223VbS59dZbWbBgAYmJiTzyyCNMmDCBF198kSFDhmAYBs2bN2f+/PkO+0Kub7o8VkREaqRDTyIiUiMFhYiI1EhBISIiNVJQiIhIjRQUIiJSIwWFiIjUSEEhIiI1+v+zGiMYDpa1vQAAAABJRU5ErkJggg==", + "text/plain": [ + "

" ] }, "metadata": {}, "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "[0.24272061700106187, 0.1890124629743475, 0.1665455764824233, 0.212820656122506, 0.18890068741966132]\n" + ] } ], "source": [ - "# 6 layer Geneformer: https://huggingface.co/ctheodoris/Geneformer/blob/main/model.safetensors\n", - "trainer_test = cc.train_all_data(model_directory=\"/path/to/Geneformer\",\n", - " prepared_input_data_file=f\"{output_dir}/{output_prefix}_labeled.dataset\",\n", - " id_class_dict_file=f\"{output_dir}/{output_prefix}_id_class_dict.pkl\",\n", - " output_directory=output_dir,\n", - " output_prefix=output_prefix)" + "# cross-validate gene classifier\n", + "all_roc_auc, roc_auc, roc_auc_sd, mean_fpr, mean_tpr, confusion, label_dicts \\\n", + " = cross_validate(subsampled_train_dataset, targets, labels, nsplits, subsample_size, training_args, freeze_layers, training_output_dir, 1)" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [], + "source": [ + "# bundle data for plotting\n", + "bundled_data = []\n", + "bundled_data += [(roc_auc, roc_auc_sd, mean_fpr, mean_tpr, \"Geneformer\", \"red\")]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# plot ROC curve\n", + "plot_ROC(bundled_data, 'Dosage Sensitive vs Insensitive TFs')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# plot confusion matrix\n", + "classes_list = [\"Dosage Sensitive\", \"Dosage Insensitive\"]\n", + "plot_confusion_matrix(classes_list, confusion, \"Geneformer\")" ] } ], @@ -1243,9 +2424,14 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.15" + "version": "3.10.11" + }, + "vscode": { + "interpreter": { + "hash": "eba1599a1f7e611c14c87ccff6793920aa63510b01fc0e229d6dd014149b8829" + } } }, "nbformat": 4, - "nbformat_minor": 5 + "nbformat_minor": 4 } diff --git a/examples/hyperparam_optimiz_for_disease_classifier.py b/examples/hyperparam_optimiz_for_disease_classifier.py new file mode 100644 index 0000000000000000000000000000000000000000..f1696deb777b398fd1a539c7b324e2a98cb3c7c6 --- /dev/null +++ b/examples/hyperparam_optimiz_for_disease_classifier.py @@ -0,0 +1,226 @@ +#!/usr/bin/env python +# coding: utf-8 + +# hyperparameter optimization with raytune for disease classification + +# imports +import os +import subprocess +GPU_NUMBER = [0,1,2,3] +os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(s) for s in GPU_NUMBER]) +os.environ["NCCL_DEBUG"] = "INFO" +os.environ["CONDA_OVERRIDE_GLIBC"] = "2.56" +os.environ["LD_LIBRARY_PATH"] = "/path/to/miniconda3/lib:/path/to/sw/lib:/path/to/sw/lib" + +# initiate runtime environment for raytune +import pyarrow # must occur prior to ray import +import ray +from ray import tune +from ray.tune import ExperimentAnalysis +from ray.tune.suggest.hyperopt import HyperOptSearch +ray.shutdown() #engage new ray session +runtime_env = {"conda": "base", + "env_vars": {"LD_LIBRARY_PATH": "/path/to/miniconda3/lib:/path/to/sw/lib:/path/to/sw/lib"}} +ray.init(runtime_env=runtime_env) + +def initialize_ray_with_check(ip_address): + """ + Initialize Ray with a specified IP address and check its status and accessibility. + + Args: + - ip_address (str): The IP address (with port) to initialize Ray. + + Returns: + - bool: True if initialization was successful and dashboard is accessible, False otherwise. + """ + try: + ray.init(address=ip_address) + print(ray.nodes()) + + services = ray.get_webui_url() + if not services: + raise RuntimeError("Ray dashboard is not accessible.") + else: + print(f"Ray dashboard is accessible at: {services}") + return True + except Exception as e: + print(f"Error initializing Ray: {e}") + return False + +# Usage: +ip = 'your_ip:xxxx' # Replace with your actual IP address and port +if initialize_ray_with_check(ip): + print("Ray initialized successfully.") +else: + print("Error during Ray initialization.") + +import datetime +import numpy as np +import pandas as pd +import random +import seaborn as sns; sns.set() +from collections import Counter +from datasets import load_from_disk +from scipy.stats import ranksums +from sklearn.metrics import accuracy_score +from transformers import BertForSequenceClassification +from transformers import Trainer +from transformers.training_args import TrainingArguments + +from geneformer import DataCollatorForCellClassification + +# number of CPU cores +num_proc=30 + +# load train dataset with columns: + # cell_type (annotation of each cell's type) + # disease (healthy or disease state) + # individual (unique ID for each patient) + # length (length of that cell's rank value encoding) +train_dataset=load_from_disk("/path/to/disease_train_data.dataset") + +# filter dataset for given cell_type +def if_cell_type(example): + return example["cell_type"].startswith("Cardiomyocyte") + +trainset_v2 = train_dataset.filter(if_cell_type, num_proc=num_proc) + +# create dictionary of disease states : label ids +target_names = ["healthy", "disease1", "disease2"] +target_name_id_dict = dict(zip(target_names,[i for i in range(len(target_names))])) + +trainset_v3 = trainset_v2.rename_column("disease","label") + +# change labels to numerical ids +def classes_to_ids(example): + example["label"] = target_name_id_dict[example["label"]] + return example + +trainset_v4 = trainset_v3.map(classes_to_ids, num_proc=num_proc) + +# separate into train, validation, test sets +indiv_set = set(trainset_v4["individual"]) +random.seed(42) +train_indiv = random.sample(indiv_set,round(0.7*len(indiv_set))) +eval_indiv = [indiv for indiv in indiv_set if indiv not in train_indiv] +valid_indiv = random.sample(eval_indiv,round(0.5*len(eval_indiv))) +test_indiv = [indiv for indiv in eval_indiv if indiv not in valid_indiv] + +def if_train(example): + return example["individual"] in train_indiv + +classifier_trainset = trainset_v4.filter(if_train,num_proc=num_proc).shuffle(seed=42) + +def if_valid(example): + return example["individual"] in valid_indiv + +classifier_validset = trainset_v4.filter(if_valid,num_proc=num_proc).shuffle(seed=42) + +# define output directory path +current_date = datetime.datetime.now() +datestamp = f"{str(current_date.year)[-2:]}{current_date.month:02d}{current_date.day:02d}" +output_dir = f"/path/to/models/{datestamp}_geneformer_DiseaseClassifier/" + +# ensure not overwriting previously saved model +saved_model_test = os.path.join(output_dir, f"pytorch_model.bin") +if os.path.isfile(saved_model_test) == True: + raise Exception("Model already saved to this directory.") + +# make output directory +subprocess.call(f'mkdir {output_dir}', shell=True) + +# set training parameters +# how many pretrained layers to freeze +freeze_layers = 2 +# batch size for training and eval +geneformer_batch_size = 12 +# number of epochs +epochs = 1 +# logging steps +logging_steps = round(len(classifier_trainset)/geneformer_batch_size/10) + +# define function to initiate model +def model_init(): + model = BertForSequenceClassification.from_pretrained("/path/to/pretrained_model/", + num_labels=len(target_names), + output_attentions = False, + output_hidden_states = False) + if freeze_layers is not None: + modules_to_freeze = model.bert.encoder.layer[:freeze_layers] + for module in modules_to_freeze: + for param in module.parameters(): + param.requires_grad = False + + model = model.to("cuda:0") + return model + +# define metrics +# note: macro f1 score recommended for imbalanced multiclass classifiers +def compute_metrics(pred): + labels = pred.label_ids + preds = pred.predictions.argmax(-1) + # calculate accuracy using sklearn's function + acc = accuracy_score(labels, preds) + return { + 'accuracy': acc, + } + +# set training arguments +training_args = { + "do_train": True, + "do_eval": True, + "evaluation_strategy": "steps", + "eval_steps": logging_steps, + "logging_steps": logging_steps, + "group_by_length": True, + "length_column_name": "length", + "disable_tqdm": True, + "skip_memory_metrics": True, # memory tracker causes errors in raytune + "per_device_train_batch_size": geneformer_batch_size, + "per_device_eval_batch_size": geneformer_batch_size, + "num_train_epochs": epochs, + "load_best_model_at_end": True, + "output_dir": output_dir, +} + +training_args_init = TrainingArguments(**training_args) + +# create the trainer +trainer = Trainer( + model_init=model_init, + args=training_args_init, + data_collator=DataCollatorForCellClassification(), + train_dataset=classifier_trainset, + eval_dataset=classifier_validset, + compute_metrics=compute_metrics, +) + +# specify raytune hyperparameter search space +ray_config = { + "num_train_epochs": tune.choice([epochs]), + "learning_rate": tune.loguniform(1e-6, 1e-3), + "weight_decay": tune.uniform(0.0, 0.3), + "lr_scheduler_type": tune.choice(["linear","cosine","polynomial"]), + "warmup_steps": tune.uniform(100, 2000), + "seed": tune.uniform(0,100), + "per_device_train_batch_size": tune.choice([geneformer_batch_size]) +} + +hyperopt_search = HyperOptSearch( + metric="eval_accuracy", mode="max") + +# optimize hyperparameters +trainer.hyperparameter_search( + direction="maximize", + backend="ray", + resources_per_trial={"cpu":8,"gpu":1}, + hp_space=lambda _: ray_config, + search_alg=hyperopt_search, + n_trials=100, # number of trials + progress_reporter=tune.CLIReporter(max_report_frequency=600, + sort_by_metric=True, + max_progress_rows=100, + mode="max", + metric="eval_accuracy", + metric_columns=["loss", "eval_loss", "eval_accuracy"]) +) \ No newline at end of file diff --git a/examples/in_silico_perturbation.ipynb b/examples/in_silico_perturbation.ipynb index f7102617ebd36956d07ba61f8e4bccdf0719515e..8d598cdaec598325681a3a74cf87930d1422dca6 100644 --- a/examples/in_silico_perturbation.ipynb +++ b/examples/in_silico_perturbation.ipynb @@ -8,80 +8,35 @@ "outputs": [], "source": [ "from geneformer import InSilicoPerturber\n", - "from geneformer import InSilicoPerturberStats\n", - "from geneformer import EmbExtractor" - ] - }, - { - "cell_type": "markdown", - "id": "cbd6851c-060e-4967-b816-e605ffe58b23", - "metadata": { - "tags": [] - }, - "source": [ - "### in silico perturbation in deletion mode to determine genes whose deletion in the dilated cardiomyopathy (dcm) state significantly shifts the embedding towards non-failing (nf) state" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c53e98cd-c603-4878-82ba-db471181bb55", - "metadata": {}, - "outputs": [], - "source": [ - "# first obtain start, goal, and alt embedding positions\n", - "# this function was changed to be separate from perturb_data\n", - "# to avoid repeating calcuations when parallelizing perturb_data\n", - "cell_states_to_model={\"state_key\": \"disease\", \n", - " \"start_state\": \"dcm\", \n", - " \"goal_state\": \"nf\", \n", - " \"alt_states\": [\"hcm\"]}\n", - "\n", - "filter_data_dict={\"cell_type\":[\"Cardiomyocyte1\",\"Cardiomyocyte2\",\"Cardiomyocyte3\"]}\n", - "\n", - "# OF NOTE: token_dictionary_file must be set to the gc-30M token dictionary if using a 30M series model\n", - "# (otherwise the EmbExtractor will use the current default model dictionary)\n", - "# 30M token dictionary: https://huggingface.co/ctheodoris/Geneformer/blob/main/geneformer/gene_dictionaries_30m/token_dictionary_gc30M.pkl\n", - "embex = EmbExtractor(model_type=\"CellClassifier\", # if using previously fine-tuned cell classifier model\n", - " num_classes=3,\n", - " filter_data=filter_data_dict,\n", - " max_ncells=1000,\n", - " emb_layer=0,\n", - " summary_stat=\"exact_mean\",\n", - " forward_batch_size=256,\n", - " nproc=16)\n", - "\n", - "state_embs_dict = embex.get_state_embs(cell_states_to_model,\n", - " \"../fine_tuned_models/gf-6L-30M-i2048_CellClassifier_cardiomyopathies_220224\", # example 30M fine-tuned model\n", - " \"path/to/input_data\",\n", - " \"path/to/output_directory\",\n", - " \"output_prefix\")" + "from geneformer import InSilicoPerturberStats" ] }, { "cell_type": "code", "execution_count": null, - "id": "981e1190-62da-4543-b7d3-6e2a2d6a6d56", + "id": "67b44366-f255-4415-a865-6a27a8ffcce7", "metadata": { "tags": [] }, "outputs": [], "source": [ - "# OF NOTE: token_dictionary_file must be set to the gc-30M token dictionary if using a 30M series model\n", - "# (otherwise the InSilicoPerturber will use the current default model dictionary)\n", - "# 30M token dictionary: https://huggingface.co/ctheodoris/Geneformer/blob/main/geneformer/gene_dictionaries_30m/token_dictionary_gc30M.pkl\n", + "# in silico perturbation in deletion mode to determine genes whose \n", + "# deletion in the dilated cardiomyopathy (dcm) state significantly shifts\n", + "# the embedding towards non-failing (nf) state\n", "isp = InSilicoPerturber(perturb_type=\"delete\",\n", " perturb_rank_shift=None,\n", " genes_to_perturb=\"all\",\n", " combos=0,\n", " anchor_gene=None,\n", - " model_type=\"CellClassifier\", # if using previously fine-tuned cell classifier model\n", + " model_type=\"CellClassifier\",\n", " num_classes=3,\n", " emb_mode=\"cell\",\n", " cell_emb_style=\"mean_pool\",\n", - " filter_data=filter_data_dict,\n", - " cell_states_to_model=cell_states_to_model,\n", - " state_embs_dict=state_embs_dict,\n", + " filter_data={\"cell_type\":[\"Cardiomyocyte1\",\"Cardiomyocyte2\",\"Cardiomyocyte3\"]},\n", + " cell_states_to_model={'state_key': 'disease', \n", + " 'start_state': 'dcm', \n", + " 'goal_state': 'nf', \n", + " 'alt_states': ['hcm']},\n", " max_ncells=2000,\n", " emb_layer=0,\n", " forward_batch_size=400,\n", @@ -96,10 +51,9 @@ "outputs": [], "source": [ "# outputs intermediate files from in silico perturbation\n", - "\n", - "isp.perturb_data(\"../fine_tuned_models/gf-6L-30M-i2048_CellClassifier_cardiomyopathies_220224\", # example 30M fine-tuned model\n", + "isp.perturb_data(\"path/to/model\",\n", " \"path/to/input_data\",\n", - " \"path/to/isp_output_directory\",\n", + " \"path/to/output_directory\",\n", " \"output_prefix\")" ] }, @@ -110,14 +64,11 @@ "metadata": {}, "outputs": [], "source": [ - "# OF NOTE: token_dictionary_file must be set to the gc-30M token dictionary if using a 30M series model\n", - "# (otherwise the InSilicoPerturberStats will use the current default model dictionary)\n", - "# 30M token dictionary: https://huggingface.co/ctheodoris/Geneformer/blob/main/geneformer/gene_dictionaries_30m/token_dictionary_gc30M.pkl\n", "ispstats = InSilicoPerturberStats(mode=\"goal_state_shift\",\n", " genes_perturbed=\"all\",\n", " combos=0,\n", " anchor_gene=None,\n", - " cell_states_to_model=cell_states_to_model)" + " cell_states_to_model={\"disease\":([\"dcm\"],[\"nf\"],[\"hcm\"])})" ] }, { @@ -128,9 +79,9 @@ "outputs": [], "source": [ "# extracts data from intermediate files and processes stats to output in final .csv\n", - "ispstats.get_stats(\"path/to/isp_output_directory\", # this should be the directory \n", + "ispstats.get_stats(\"path/to/input_data\",\n", " None,\n", - " \"path/to/isp_stats_output_directory\",\n", + " \"path/to/output_directory\",\n", " \"output_prefix\")" ] } @@ -151,7 +102,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.15" + "version": "3.10.11" } }, "nbformat": 4, diff --git a/examples/multitask_cell_classification.ipynb b/examples/multitask_cell_classification.ipynb deleted file mode 100644 index b3f13b7477c7fb8797bf871b90f943877fb61029..0000000000000000000000000000000000000000 --- a/examples/multitask_cell_classification.ipynb +++ /dev/null @@ -1,420 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "866f100c-e11a-4e7b-a37c-831775d845a7", - "metadata": {}, - "source": [ - "# Geneformer Multi-Task Cell Classifier Tutorial\n", - "\n", - "This tutorial demonstrates how to use the Geneformer Multi-Task Cell Classifier and optimizatize hyperparameter for fine-tuning" - ] - }, - { - "cell_type": "markdown", - "id": "311ba456-b44d-40c7-941d-3fc03bcda85a", - "metadata": {}, - "source": [ - "## 1. Installation and Imports\n", - "\n", - "First import the necessary modules." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "cd9defdc-0524-4c3b-a741-27117ed3a5be", - "metadata": {}, - "outputs": [], - "source": [ - "from geneformer import MTLClassifier" - ] - }, - { - "cell_type": "markdown", - "id": "790e9c3c-f6d9-44b3-b9a5-05725760f4fd", - "metadata": {}, - "source": [ - "## 2. Set up Paths and Parameters\n", - "\n", - "Now, let's set up the necessary paths and parameters for our classifier. We'll also define our task columns, which are specific columns from our dataset that represent the classification tasks we want to train the model on." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "04a04197-8e45-47f8-a86f-202209ea10ae", - "metadata": {}, - "outputs": [], - "source": [ - "# Define paths\n", - "pretrained_path = \"/path/to/pretrained/Geneformer/model\" \n", - "# input data is tokenized rank value encodings generated by Geneformer tokenizer (see tokenizing_scRNAseq_data.ipynb)\n", - "train_path = \"/path/to/train/data.dataset\"\n", - "val_path = \"/path/to/val/data.dataset\"\n", - "test_path = \"/path/to/test/data.dataset\"\n", - "results_dir = \"/path/to/results/directory\"\n", - "model_save_path = \"/path/to/model/save/path\"\n", - "tensorboard_log_dir = \"/path/to/tensorboard/log/dir\"\n", - "\n", - "# Define tasks and hyperparameters\n", - "# task_columns should be a list of column names from your dataset\n", - "# Each column represents a specific classification task (e.g. cell type, disease state)\n", - "task_columns = [\"cell_type\", \"disease_state\"] # Example task columns\n", - "\n", - "hyperparameters = {\n", - " \"learning_rate\": {\"type\": \"float\", \"low\": 1e-5, \"high\": 1e-3, \"log\": True},\n", - " \"warmup_ratio\": {\"type\": \"float\", \"low\": 0.005, \"high\": 0.01},\n", - " \"weight_decay\": {\"type\": \"float\", \"low\": 0.01, \"high\": 0.1},\n", - " \"dropout_rate\": {\"type\": \"float\", \"low\": 0.0, \"high\": 0.7},\n", - " \"lr_scheduler_type\": {\"type\": \"categorical\", \"choices\": [\"cosine\"]},\n", - " \"task_weights\": {\"type\": \"float\", \"low\": 0.1, \"high\": 2.0}\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "31857690-a739-435a-aefd-f171fafc1b78", - "metadata": {}, - "source": [ - "In the code above, we've defined `task_columns` as `[\"cell_type\", \"disease_state\"]`. This means our model will be trained to classify cells based on two tasks:\n", - "1. Identifying the cell type\n", - "2. Determining the disease state\n", - "3. Note: \"unique_cell_id\" is a required column in the dataset for logging and inference purposes\n", - "\n", - "These column names should correspond to actual columns in your dataset. Each column should contain the labels for that specific classification task.\n", - "\n", - "For example, your dataset might look something like this:\n", - "\n", - " | unique_cell_id | input_ids | ... | cell_type | disease_state |\n", - " |----------------|-----------|-----|-----------|---------------|\n", - " | cell1 | ... | ... | neuron | healthy |\n", - " | cell2 | ... | ... | astrocyte | diseased |\n", - " | ... | ... | ... | ... | ... |\n", - "The model will learn to predict classes within 'cell_type' and 'disease_state' " - ] - }, - { - "cell_type": "markdown", - "id": "b9e3050a-6162-4c01-b6fd-8784bf4ab1e4", - "metadata": {}, - "source": [ - "## 3. Initialize the MTLClassifier\n", - "\n", - "Now, let's create an instance of the MTLClassifier with our defined parameters and task columns." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e27caac9-670c-409d-9313-50201c665cb9", - "metadata": {}, - "outputs": [], - "source": [ - "mc = MTLClassifier(\n", - " task_columns=task_columns, # Our defined classification tasks\n", - " study_name=\"MTLClassifier_example\",\n", - " pretrained_path=pretrained_path,\n", - " train_path=train_path,\n", - " val_path=val_path,\n", - " test_path=test_path,\n", - " model_save_path=model_save_path,\n", - " results_dir=results_dir,\n", - " tensorboard_log_dir=tensorboard_log_dir,\n", - " hyperparameters=hyperparameters,\n", - " n_trials=15, # Number of trials for hyperparameter optimization (at least 50 suggested)\n", - " epochs=1, # Number of training epochs (1 suggested to prevent overfitting)\n", - " batch_size=8, # Adjust based on available GPU memory\n", - " seed=42\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "0d729444-e3ad-4584-9659-0c464ac97462", - "metadata": {}, - "source": [ - "## 4. Run Hyperparameter Optimization\n", - "\n", - "Now, let's run the Optuna study to optimize our hyperparameters for both classification tasks." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9298aa3e-6a52-4aa8-b9ff-b63d97beac93", - "metadata": {}, - "outputs": [], - "source": [ - "mc.run_optuna_study()" - ] - }, - { - "cell_type": "markdown", - "id": "af23075d-d07b-43d3-bc5d-4df4d5d7199b", - "metadata": {}, - "source": [ - "## 5. Evaluate the Model on Test Data\n", - "\n", - "After optimization, we can evaluate our model on the test dataset. This will provide performance metrics for both classification tasks. CSV containing following keys will be generated in specified results directiory \"Cell ID, task(1...n) True,task(1.,.n) Pred,task(1...n) Probabilities\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "461bf8d3-b964-4ff4-994f-9f3d313d4614", - "metadata": {}, - "outputs": [], - "source": [ - "mc.load_and_evaluate_test_model()" - ] - }, - { - "cell_type": "markdown", - "id": "31cfeb2d-6673-4b02-a79c-2533cc5e4d28", - "metadata": {}, - "source": [ - "## 6. (Optional) Manual Hyperparameter Tuning\n", - "\n", - "If you prefer to set hyperparameters manually, you can use the following approach:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8ee6b99f-42e9-4abf-a292-aa9047735e0e", - "metadata": {}, - "outputs": [], - "source": [ - "manual_hyperparameters = {\n", - " \"learning_rate\": 0.001,\n", - " \"warmup_ratio\": 0.01,\n", - " \"weight_decay\": 0.1,\n", - " \"dropout_rate\": 0.1,\n", - " \"lr_scheduler_type\": \"cosine\",\n", - " \"task_weights\": [1, 1], # Weights for each task (cell_type, disease_state)\n", - " \"max_layers_to_freeze\": 2\n", - "}\n", - "\n", - "mc_manual = MTLClassifier(\n", - " task_columns=task_columns,\n", - " study_name=\"mtl_manual\",\n", - " pretrained_path=pretrained_path,\n", - " train_path=train_path,\n", - " val_path=val_path,\n", - " test_path=test_path,\n", - " model_save_path=model_save_path,\n", - " results_dir=results_dir,\n", - " tensorboard_log_dir=tensorboard_log_dir,\n", - " manual_hyperparameters=manual_hyperparameters,\n", - " use_manual_hyperparameters=True,\n", - " epochs=10,\n", - " batch_size=32,\n", - " seed=42\n", - ")\n", - "\n", - "mc_manual.run_manual_tuning()" - ] - }, - { - "cell_type": "markdown", - "id": "dbaac008-fc00-4b71-8e78-89b2d922d9d8", - "metadata": {}, - "source": [ - "# Geneformer In Silico Perturber Tutorial (MTL Quantized)\n", - "This demonstrates how to use the Geneformer In Silico Perturber with a Multi-Task Learning (MTL) model in a quantized configuration to optimize runtime and memory." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2e15ad57-736c-48f0-be87-39cf5015bc5c", - "metadata": {}, - "outputs": [], - "source": [ - "from geneformer import InSilicoPerturber, EmbExtractor, InSilicoPerturberStats" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "43c18140-151e-4d44-95b4-a9b3a47172cf", - "metadata": {}, - "outputs": [], - "source": [ - "# Define paths\n", - "model_directory = \"/path/to/model/save/path\"\n", - "input_data_file = \"/path/to/input/data.dataset\"\n", - "output_directory = \"/path/to/output/directory\"\n", - "output_prefix = \"mtl_quantized_perturbation\"\n", - "\n", - "# Define parameters\n", - "perturb_type = \"delete\" # or \"overexpress\"\n", - "\n", - "# Define cell states to model\n", - "cell_states_to_model = {\n", - " \"state_key\": \"disease_state\", \n", - " \"start_state\": \"disease\", \n", - " \"goal_state\": \"control\"\n", - "}\n", - "\n", - "# Define filter data\n", - "filter_data_dict = {\n", - " \"cell_type\": [\"Fibroblast\"]\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "3010d0bf-b23c-45c1-ac12-8c472dc8b7a1", - "metadata": {}, - "source": [ - "## 3. Extract State Embeddings\n", - "\n", - "Before we initialize the InSilicoPerturber, we need to extract the state embeddings using the EmbExtractor." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "215f0a90-8041-417d-a5d3-b2483626c3b2", - "metadata": {}, - "outputs": [], - "source": [ - "# Initialize EmbExtractor\n", - "embex = EmbExtractor(\n", - " filter_data_dict=filter_data_dict,\n", - " max_ncells=1000, # Number of cells to extract embeddings for\n", - " emb_layer=0, # Use the second to last layer\n", - " emb_mode = \"cls\",\n", - " summary_stat=\"exact_mean\",\n", - " forward_batch_size=8, # Adjust based on available GPU memory\n", - " nproc=4\n", - ")\n", - "\n", - "# Extract state embeddings\n", - "state_embs_dict = embex.get_state_embs(\n", - " cell_states_to_model,\n", - " model_directory=model_directory,\n", - " input_data_file=input_data_file,\n", - " output_directory=output_directory,\n", - " output_prefix=output_prefix\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "23f14e36-4529-4fb2-8af9-7f4875cf81e3", - "metadata": {}, - "source": [ - "## 4. Initialize the InSilicoPerturber\n", - "\n", - "Now that we have our state embeddings, let's create an instance of the InSilicoPerturber with MTL and quantized configurations." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "09f985a1-91bc-4e8d-8001-a3663531b570", - "metadata": {}, - "outputs": [], - "source": [ - "# Initialize InSilicoPerturber\n", - "isp = InSilicoPerturber(\n", - " perturb_type=perturb_type,\n", - " genes_to_perturb=\"all\", # Perturb all genes\n", - " model_type=\"MTLCellClassifier-Quantized\", # Use quantized MTL model\n", - " emb_mode=\"cls\", # Use CLS token embedding\n", - " cell_states_to_model=cell_states_to_model,\n", - " state_embs_dict=state_embs_dict,\n", - " max_ncells=1000, # Number of cells to perturb (larger number increases power)\n", - " emb_layer=0, \n", - " forward_batch_size=8, # Adjust based on available GPU memory\n", - " nproc=1\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "cfcc2c1e-fd7f-4a36-99fc-ac7f43e5be6b", - "metadata": {}, - "source": [ - "## 5. Run In Silico Perturbation\n", - "\n", - "Run the in silico perturbation on the dataset." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cf030c09-8ae4-45a7-aaf7-3fc2af4fe296", - "metadata": {}, - "outputs": [], - "source": [ - "# Run perturbation and output intermediate files\n", - "isp.perturb_data(\n", - " model_directory=model_directory,\n", - " input_data_file=input_data_file,\n", - " output_directory=output_directory,\n", - " output_prefix=output_prefix\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "bb8ec074-6f2f-422b-a973-37ed32a15c38", - "metadata": {}, - "source": [ - "## 6. Process Results with InSilicoPerturberStats\n", - "\n", - "After running the perturbation, we'll use InSilicoPerturberStats to process the intermediate files and generate the final statistics." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0a748043-43fc-47ad-ace5-f0ae3dd34674", - "metadata": {}, - "outputs": [], - "source": [ - "# Initialize InSilicoPerturberStats\n", - "ispstats = InSilicoPerturberStats(\n", - " mode=\"goal_state_shift\",\n", - " genes_perturbed=\"all\",\n", - " combos=0,\n", - " anchor_gene=None,\n", - " cell_states_to_model=cell_states_to_model\n", - ")\n", - "\n", - "# Process stats and output final .csv\n", - "ispstats.get_stats(\n", - " input_data_file,\n", - " None,\n", - " output_directory,\n", - " output_prefix\n", - ")" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.5" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/pretraining_new_model/pretrain_geneformer_w_deepspeed.py b/examples/pretraining_new_model/pretrain_geneformer_w_deepspeed.py index 205fb9624ee76d6c0e8c727a8014c8544fd30584..f6b2c84eecfd5814ac5887e749635605f8ece2c4 100644 --- a/examples/pretraining_new_model/pretrain_geneformer_w_deepspeed.py +++ b/examples/pretraining_new_model/pretrain_geneformer_w_deepspeed.py @@ -138,9 +138,7 @@ training_args = { "per_device_train_batch_size": geneformer_batch_size, "num_train_epochs": epochs, "save_strategy": "steps", - "save_steps": np.floor( - num_examples / geneformer_batch_size / 8 - ), # 8 saves per epoch + "save_steps": np.floor(num_examples / geneformer_batch_size / 8), # 8 saves per epoch "logging_steps": 1000, "output_dir": training_output_dir, "logging_dir": logging_dir, diff --git a/examples/tokenizing_scRNAseq_data.ipynb b/examples/tokenizing_scRNAseq_data.ipynb index 58c629a166529b066ba3615c16a26e59dd46295f..52776a39d8ebb7076798c5e171f41464c902d9d6 100644 --- a/examples/tokenizing_scRNAseq_data.ipynb +++ b/examples/tokenizing_scRNAseq_data.ipynb @@ -7,39 +7,23 @@ "tags": [] }, "source": [ - "## Tokenizing .loom or .h5ad single cell RNA-seq data to rank value encoding .dataset format" + "## Tokenizing .loom single cell RNA-seq data to rank value encoding .dataset format" ] }, { "cell_type": "markdown", - "id": "1fe86f48-5578-47df-b373-58c21ec170ab", + "id": "350e6252-b783-494b-9767-f087eb868a15", "metadata": {}, "source": [ - "#### Input data is a directory with .loom or .h5ad files containing raw counts from single cell RNAseq data, including all genes detected in the transcriptome without feature selection. The input file type is specified by the argument file_format in the tokenize_data function.\n", + "#### Input data is a directory with .loom files containing raw counts from single cell RNAseq data, including all genes detected in the transcriptome without feature selection. \n", "\n", - "#### The discussion below references the .loom file format, but the analagous labels are required for .h5ad files, just that they will be column instead of row attributes and vice versa due to the transposed format of the two file types.\n", - "\n", - "#### Genes should be labeled with Ensembl IDs (loom row attribute \"ensembl_id\"), which provide a unique identifer for conversion to tokens. Other forms of gene annotations (e.g. gene names) can be converted to Ensembl IDs via Ensembl Biomart. Cells should be labeled with the total read count in the cell (loom column attribute \"n_counts\") to be used for normalization.\n", + "#### Genes should be labeled with Ensembl IDs (row attribute \"ensembl_id\"), which provide a unique identifer for conversion to tokens. Other forms of gene annotations (e.g. gene names) can be converted to Ensembl IDs via Ensembl Biomart. Cells should be labeled with the total read count in the cell (column attribute \"n_counts\") to be used for normalization.\n", "\n", "#### No cell metadata is required, but custom cell attributes may be passed onto the tokenized dataset by providing a dictionary of custom attributes to be added, which is formatted as loom_col_attr_name : desired_dataset_col_attr_name. For example, if the original .loom dataset has column attributes \"cell_type\" and \"organ_major\" and one would like to retain these attributes as labels in the tokenized dataset with the new names \"cell_type\" and \"organ\", respectively, the following custom attribute dictionary should be provided: {\"cell_type\": \"cell_type\", \"organ_major\": \"organ\"}. \n", "\n", "#### Additionally, if the original .loom file contains a cell column attribute called \"filter_pass\", this column will be used as a binary indicator of whether to include these cells in the tokenized data. All cells with \"1\" in this attribute will be tokenized, whereas the others will be excluded. One may use this column to indicate QC filtering or other criteria for selection for inclusion in the final tokenized dataset.\n", "\n", - "#### If one's data is in other formats besides .loom or .h5ad, one can use the relevant tools (such as Anndata tools) to convert the file to a .loom or .h5ad format prior to running the transcriptome tokenizer." - ] - }, - { - "cell_type": "markdown", - "id": "32c69493-4e5a-4b07-8dc1-958ff2ee7d0b", - "metadata": {}, - "source": [ - "**********************************************************************************************************\n", - "#### OF NOTE: PLEASE ENSURE THE CORRECT TOKEN DICTIONARY AND GENE MEDIAN FILE IS USED FOR THE CORRECT MODEL.\n", - "#### 95M: current defaults; 30M: https://huggingface.co/ctheodoris/Geneformer/tree/main/geneformer/gene_dictionaries_30m\n", - "\n", - "#### ADDITIONALLY:\n", - "#### The 95M model series require the special_token argument to be set to True and model_input_size to be 4096. (current defaults)\n", - "#### The 30M model series require the special_token argument to be set to False and the model_input_size to be 2048." + "#### If one's data is in other formats besides .loom, one can use the relevant tools (such as Anndata tools) to convert the file to a .loom format prior to running the transcriptome tokenizer." ] }, { @@ -59,11 +43,8 @@ "metadata": {}, "outputs": [], "source": [ - "tk = TranscriptomeTokenizer({\"cell_type\": \"cell_type\", \"organ_major\": \"organ\"}, nproc=16)\n", - "tk.tokenize_data(\"loom_data_directory\", \n", - " \"output_directory\", \n", - " \"output_prefix\", \n", - " file_format=\"loom\")" + "tk = TranscriptomeTokenizer({\"cell_type\": \"cell_type\", \"organ_major\": \"organ_major\"}, nproc=4)\n", + "tk.tokenize_data(\"loom_data_directory\", \"output_directory\", \"output_prefix\")" ] } ], @@ -83,7 +64,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.15" + "version": "3.10.11" } }, "nbformat": 4, diff --git a/fine_tuned_models/gf-6L-30M-i2048_CellClassifier_cardiomyopathies_220224/config.json b/fine_tuned_models/geneformer-6L-30M_CellClassifier_cardiomyopathies_220224/config.json similarity index 100% rename from fine_tuned_models/gf-6L-30M-i2048_CellClassifier_cardiomyopathies_220224/config.json rename to fine_tuned_models/geneformer-6L-30M_CellClassifier_cardiomyopathies_220224/config.json diff --git a/fine_tuned_models/gf-6L-30M-i2048_CellClassifier_cardiomyopathies_220224/optimizer.pt b/fine_tuned_models/geneformer-6L-30M_CellClassifier_cardiomyopathies_220224/optimizer.pt similarity index 100% rename from fine_tuned_models/gf-6L-30M-i2048_CellClassifier_cardiomyopathies_220224/optimizer.pt rename to fine_tuned_models/geneformer-6L-30M_CellClassifier_cardiomyopathies_220224/optimizer.pt diff --git a/fine_tuned_models/gf-6L-30M-i2048_CellClassifier_cardiomyopathies_220224/pytorch_model.bin b/fine_tuned_models/geneformer-6L-30M_CellClassifier_cardiomyopathies_220224/pytorch_model.bin similarity index 100% rename from fine_tuned_models/gf-6L-30M-i2048_CellClassifier_cardiomyopathies_220224/pytorch_model.bin rename to fine_tuned_models/geneformer-6L-30M_CellClassifier_cardiomyopathies_220224/pytorch_model.bin diff --git a/fine_tuned_models/gf-6L-30M-i2048_CellClassifier_cardiomyopathies_220224/rng_state.pth b/fine_tuned_models/geneformer-6L-30M_CellClassifier_cardiomyopathies_220224/rng_state.pth similarity index 100% rename from fine_tuned_models/gf-6L-30M-i2048_CellClassifier_cardiomyopathies_220224/rng_state.pth rename to fine_tuned_models/geneformer-6L-30M_CellClassifier_cardiomyopathies_220224/rng_state.pth diff --git a/fine_tuned_models/gf-6L-30M-i2048_CellClassifier_cardiomyopathies_220224/scheduler.pt b/fine_tuned_models/geneformer-6L-30M_CellClassifier_cardiomyopathies_220224/scheduler.pt similarity index 100% rename from fine_tuned_models/gf-6L-30M-i2048_CellClassifier_cardiomyopathies_220224/scheduler.pt rename to fine_tuned_models/geneformer-6L-30M_CellClassifier_cardiomyopathies_220224/scheduler.pt diff --git a/fine_tuned_models/gf-6L-30M-i2048_CellClassifier_cardiomyopathies_220224/trainer_state.json b/fine_tuned_models/geneformer-6L-30M_CellClassifier_cardiomyopathies_220224/trainer_state.json similarity index 100% rename from fine_tuned_models/gf-6L-30M-i2048_CellClassifier_cardiomyopathies_220224/trainer_state.json rename to fine_tuned_models/geneformer-6L-30M_CellClassifier_cardiomyopathies_220224/trainer_state.json diff --git a/fine_tuned_models/gf-6L-30M-i2048_CellClassifier_cardiomyopathies_220224/training_args.bin b/fine_tuned_models/geneformer-6L-30M_CellClassifier_cardiomyopathies_220224/training_args.bin similarity index 100% rename from fine_tuned_models/gf-6L-30M-i2048_CellClassifier_cardiomyopathies_220224/training_args.bin rename to fine_tuned_models/geneformer-6L-30M_CellClassifier_cardiomyopathies_220224/training_args.bin diff --git a/fine_tuned_models/gf-12L-95M-i4096_MTLCellClassifier_CELLxGENE_240522/config.json b/fine_tuned_models/gf-12L-95M-i4096_MTLCellClassifier_CELLxGENE_240522/config.json deleted file mode 100755 index bc8099f84af0bd3e35d700a7135dd417e38f6bea..0000000000000000000000000000000000000000 --- a/fine_tuned_models/gf-12L-95M-i4096_MTLCellClassifier_CELLxGENE_240522/config.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "architectures": [ - "BertForMaskedLM" - ], - "attention_probs_dropout_prob": 0.02, - "classifier_dropout": null, - "hidden_act": "relu", - "hidden_dropout_prob": 0.02, - "hidden_size": 512, - "initializer_range": 0.02, - "intermediate_size": 1024, - "layer_norm_eps": 1e-12, - "max_position_embeddings": 4096, - "model_type": "bert", - "num_attention_heads": 8, - "num_hidden_layers": 12, - "pad_token_id": 0, - "position_embedding_type": "absolute", - "torch_dtype": "float32", - "transformers_version": "4.37.2", - "type_vocab_size": 2, - "use_cache": true, - "vocab_size": 20275 -} diff --git a/fine_tuned_models/gf-12L-95M-i4096_MTLCellClassifier_CELLxGENE_240522/pytorch_model.bin b/fine_tuned_models/gf-12L-95M-i4096_MTLCellClassifier_CELLxGENE_240522/pytorch_model.bin deleted file mode 100755 index 87625b1b8fe02c6aa0fc3ffd8c746275570e589d..0000000000000000000000000000000000000000 --- a/fine_tuned_models/gf-12L-95M-i4096_MTLCellClassifier_CELLxGENE_240522/pytorch_model.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:07b28d8c7bb789d59755c42d32f6182cc04d2cf34aafaa6397aa50e4fdf1a9b4 -size 152363342 diff --git a/gf-12L-30M-i2048/config.json b/geneformer-12L-30M/config.json similarity index 100% rename from gf-12L-30M-i2048/config.json rename to geneformer-12L-30M/config.json diff --git a/gf-12L-30M-i2048/pytorch_model.bin b/geneformer-12L-30M/pytorch_model.bin similarity index 100% rename from gf-12L-30M-i2048/pytorch_model.bin rename to geneformer-12L-30M/pytorch_model.bin diff --git a/gf-12L-30M-i2048/training_args.bin b/geneformer-12L-30M/training_args.bin similarity index 100% rename from gf-12L-30M-i2048/training_args.bin rename to geneformer-12L-30M/training_args.bin diff --git a/geneformer/__init__.py b/geneformer/__init__.py index 52d43619d06f2a7c019b480d1958a82d287d26ff..99c10b12ed2fe21f78dc996fc09a10d5571ddfd4 100644 --- a/geneformer/__init__.py +++ b/geneformer/__init__.py @@ -1,34 +1,12 @@ -# ruff: noqa: F401 -import warnings -from pathlib import Path - -warnings.filterwarnings("ignore", message=".*The 'nopython' keyword.*") # noqa # isort:skip - -GENE_MEDIAN_FILE = Path(__file__).parent / "gene_median_dictionary_gc95M.pkl" -TOKEN_DICTIONARY_FILE = Path(__file__).parent / "token_dictionary_gc95M.pkl" -ENSEMBL_DICTIONARY_FILE = Path(__file__).parent / "gene_name_id_dict_gc95M.pkl" -ENSEMBL_MAPPING_FILE = Path(__file__).parent / "ensembl_mapping_dict_gc95M.pkl" - -from . import ( - collator_for_classification, - emb_extractor, - in_silico_perturber, - in_silico_perturber_stats, - pretrainer, - tokenizer, -) -from .collator_for_classification import ( - DataCollatorForCellClassification, - DataCollatorForGeneClassification, -) -from .emb_extractor import EmbExtractor, get_embs -from .in_silico_perturber import InSilicoPerturber -from .in_silico_perturber_stats import InSilicoPerturberStats -from .pretrainer import GeneformerPretrainer +from . import tokenizer +from . import pretrainer +from . import collator_for_classification +from . import in_silico_perturber +from . import in_silico_perturber_stats from .tokenizer import TranscriptomeTokenizer - -from . import classifier # noqa # isort:skip -from .classifier import Classifier # noqa # isort:skip - -from . import mtl_classifier # noqa # isort:skip -from .mtl_classifier import MTLClassifier # noqa # isort:skip +from .pretrainer import GeneformerPretrainer +from .collator_for_classification import DataCollatorForGeneClassification +from .collator_for_classification import DataCollatorForCellClassification +from .emb_extractor import EmbExtractor +from .in_silico_perturber import InSilicoPerturber +from .in_silico_perturber_stats import InSilicoPerturberStats \ No newline at end of file diff --git a/geneformer/classifier.py b/geneformer/classifier.py deleted file mode 100644 index b5ac161e461a014cce6df0d75262a1bc98e88259..0000000000000000000000000000000000000000 --- a/geneformer/classifier.py +++ /dev/null @@ -1,1563 +0,0 @@ -""" -Geneformer classifier. - -**Input data:** - -| Cell state classifier: -| Single-cell transcriptomes as Geneformer rank value encodings with cell state labels in Geneformer .dataset format (generated from single-cell RNAseq data by tokenizer.py) - -| Gene classifier: -| Dictionary in format {Gene_label: list(genes)} for gene labels and single-cell transcriptomes as Geneformer rank value encodings in Geneformer .dataset format (generated from single-cell RNAseq data by tokenizer.py) - -**Usage:** - -.. code-block :: python - - >>> from geneformer import Classifier - >>> cc = Classifier(classifier="cell", # example of cell state classifier - ... cell_state_dict={"state_key": "disease", "states": "all"}, - ... filter_data={"cell_type":["Cardiomyocyte1","Cardiomyocyte2","Cardiomyocyte3"]}, - ... training_args=training_args, - ... freeze_layers = 2, - ... num_crossval_splits = 1, - ... forward_batch_size=200, - ... nproc=16) - >>> cc.prepare_data(input_data_file="path/to/input_data", - ... output_directory="path/to/output_directory", - ... output_prefix="output_prefix") - >>> all_metrics = cc.validate(model_directory="path/to/model", - ... prepared_input_data_file=f"path/to/output_directory/{output_prefix}_labeled.dataset", - ... id_class_dict_file=f"path/to/output_directory/{output_prefix}_id_class_dict.pkl", - ... output_directory="path/to/output_directory", - ... output_prefix="output_prefix", - ... predict_eval=True) - >>> cc.plot_conf_mat(conf_mat_dict={"Geneformer": all_metrics["conf_matrix"]}, - ... output_directory="path/to/output_directory", - ... output_prefix="output_prefix", - ... custom_class_order=["healthy","disease1","disease2"]) - >>> cc.plot_predictions(predictions_file=f"path/to/output_directory/datestamp_geneformer_cellClassifier_{output_prefix}/ksplit1/predictions.pkl", - ... id_class_dict_file=f"path/to/output_directory/{output_prefix}_id_class_dict.pkl", - ... title="disease", - ... output_directory="path/to/output_directory", - ... output_prefix="output_prefix", - ... custom_class_order=["healthy","disease1","disease2"]) -""" - -import datetime -import logging -import os -import pickle -import subprocess -from pathlib import Path - -import numpy as np -import pandas as pd -import seaborn as sns -from tqdm.auto import tqdm, trange -from transformers import Trainer -from transformers.training_args import TrainingArguments - -from . import ( - TOKEN_DICTIONARY_FILE, - DataCollatorForCellClassification, - DataCollatorForGeneClassification, -) -from . import classifier_utils as cu -from . import evaluation_utils as eu -from . import perturber_utils as pu - -sns.set() - - -logger = logging.getLogger(__name__) - - -class Classifier: - valid_option_dict = { - "classifier": {"cell", "gene"}, - "quantize": {bool, dict}, - "cell_state_dict": {None, dict}, - "gene_class_dict": {None, dict}, - "filter_data": {None, dict}, - "rare_threshold": {int, float}, - "max_ncells": {None, int}, - "max_ncells_per_class": {None, int}, - "training_args": {None, dict}, - "freeze_layers": {int}, - "num_crossval_splits": {0, 1, 5}, - "split_sizes": {None, dict}, - "no_eval": {bool}, - "stratify_splits_col": {None, str}, - "forward_batch_size": {int}, - "token_dictionary_file": {None, str}, - "nproc": {int}, - "ngpu": {int}, - } - - def __init__( - self, - classifier=None, - quantize=False, - cell_state_dict=None, - gene_class_dict=None, - filter_data=None, - rare_threshold=0, - max_ncells=None, - max_ncells_per_class=None, - training_args=None, - ray_config=None, - freeze_layers=0, - num_crossval_splits=1, - split_sizes={"train": 0.8, "valid": 0.1, "test": 0.1}, - stratify_splits_col=None, - no_eval=False, - forward_batch_size=100, - token_dictionary_file=None, - nproc=4, - ngpu=1, - ): - """ - Initialize Geneformer classifier. - - **Parameters:** - - classifier : {"cell", "gene"} - | Whether to fine-tune a cell state or gene classifier. - quantize : bool, dict - | Whether to fine-tune a quantized model. - | If True and no config provided, will use default. - | Will use custom config if provided. - | Configs should be provided as dictionary of BitsAndBytesConfig (transformers) and LoraConfig (peft). - | For example: {"bnb_config": BitsAndBytesConfig(...), - | "peft_config": LoraConfig(...)} - cell_state_dict : None, dict - | Cell states to fine-tune model to distinguish. - | Two-item dictionary with keys: state_key and states - | state_key: key specifying name of column in .dataset that defines the states to model - | states: list of values in the state_key column that specifies the states to model - | Alternatively, instead of a list of states, can specify "all" to use all states in that state key from input data. - | Of note, if using "all", states will be defined after data is filtered. - | Must have at least 2 states to model. - | For example: {"state_key": "disease", - | "states": ["nf", "hcm", "dcm"]} - | or - | {"state_key": "disease", - | "states": "all"} - gene_class_dict : None, dict - | Gene classes to fine-tune model to distinguish. - | Dictionary in format: {Gene_label_A: list(geneA1, geneA2, ...), - | Gene_label_B: list(geneB1, geneB2, ...)} - | Gene values should be Ensembl IDs. - filter_data : None, dict - | Default is to fine-tune with all input data. - | Otherwise, dictionary specifying .dataset column name and list of values to filter by. - rare_threshold : float - | Threshold below which rare cell states should be removed. - | For example, setting to 0.05 will remove cell states representing - | < 5% of the total cells from the cell state classifier's possible classes. - max_ncells : None, int - | Maximum number of cells to use for fine-tuning. - | Default is to fine-tune with all input data. - max_ncells_per_class : None, int - | Maximum number of cells per cell class to use for fine-tuning. - | Of note, will be applied after max_ncells above. - | (Only valid for cell classification.) - training_args : None, dict - | Training arguments for fine-tuning. - | If None, defaults will be inferred for 6 layer Geneformer. - | Otherwise, will use the Hugging Face defaults: - | https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments - | Note: Hyperparameter tuning is highly recommended, rather than using defaults. - ray_config : None, dict - | Training argument ranges for tuning hyperparameters with Ray. - freeze_layers : int - | Number of layers to freeze from fine-tuning. - | 0: no layers will be frozen; 2: first two layers will be frozen; etc. - num_crossval_splits : {0, 1, 5} - | 0: train on all data without splitting - | 1: split data into train and eval sets by designated split_sizes["valid"] - | 5: split data into 5 folds of train and eval sets by designated split_sizes["valid"] - split_sizes : None, dict - | Dictionary of proportion of data to hold out for train, validation, and test sets - | {"train": 0.8, "valid": 0.1, "test": 0.1} if intending 80/10/10 train/valid/test split - stratify_splits_col : None, str - | Name of column in .dataset to be used for stratified splitting. - | Proportion of each class in this column will be the same in the splits as in the original dataset. - no_eval : bool - | If True, will skip eval step and use all data for training. - | Otherwise, will perform eval during training. - forward_batch_size : int - | Batch size for forward pass (for evaluation, not training). - token_dictionary_file : None, str - | Default is to use token dictionary file from Geneformer - | Otherwise, will load custom gene token dictionary. - nproc : int - | Number of CPU processes to use. - ngpu : int - | Number of GPUs available. - - """ - - self.classifier = classifier - if self.classifier == "cell": - self.model_type = "CellClassifier" - elif self.classifier == "gene": - self.model_type = "GeneClassifier" - self.quantize = quantize - self.cell_state_dict = cell_state_dict - self.gene_class_dict = gene_class_dict - self.filter_data = filter_data - self.rare_threshold = rare_threshold - self.max_ncells = max_ncells - self.max_ncells_per_class = max_ncells_per_class - self.training_args = training_args - self.ray_config = ray_config - self.freeze_layers = freeze_layers - self.num_crossval_splits = num_crossval_splits - self.split_sizes = split_sizes - self.train_size = self.split_sizes["train"] - self.valid_size = self.split_sizes["valid"] - self.oos_test_size = self.split_sizes["test"] - self.eval_size = self.valid_size / (self.train_size + self.valid_size) - self.stratify_splits_col = stratify_splits_col - self.no_eval = no_eval - self.forward_batch_size = forward_batch_size - self.token_dictionary_file = token_dictionary_file - self.nproc = nproc - self.ngpu = ngpu - - if self.training_args is None: - logger.warning( - "Hyperparameter tuning is highly recommended for optimal results. " - "No training_args provided; using default hyperparameters." - ) - - self.validate_options() - - if self.filter_data is None: - self.filter_data = dict() - - if self.classifier == "cell": - if self.cell_state_dict["states"] != "all": - self.filter_data[ - self.cell_state_dict["state_key"] - ] = self.cell_state_dict["states"] - - # load token dictionary (Ensembl IDs:token) - if self.token_dictionary_file is None: - self.token_dictionary_file = TOKEN_DICTIONARY_FILE - with open(self.token_dictionary_file, "rb") as f: - self.gene_token_dict = pickle.load(f) - - self.token_gene_dict = {v: k for k, v in self.gene_token_dict.items()} - - # filter genes for gene classification for those in token dictionary - if self.classifier == "gene": - all_gene_class_values = set(pu.flatten_list(self.gene_class_dict.values())) - missing_genes = [ - gene - for gene in all_gene_class_values - if gene not in self.gene_token_dict.keys() - ] - if len(missing_genes) == len(all_gene_class_values): - logger.error( - "None of the provided genes to classify are in token dictionary." - ) - raise - elif len(missing_genes) > 0: - logger.warning( - f"Genes to classify {missing_genes} are not in token dictionary." - ) - self.gene_class_dict = { - k: list(set([self.gene_token_dict.get(gene) for gene in v])) - for k, v in self.gene_class_dict.items() - } - empty_classes = [] - for k, v in self.gene_class_dict.items(): - if len(v) == 0: - empty_classes += [k] - if len(empty_classes) > 0: - logger.error( - f"Class(es) {empty_classes} did not contain any genes in the token dictionary." - ) - raise - - def validate_options(self): - # confirm arguments are within valid options and compatible with each other - for attr_name, valid_options in self.valid_option_dict.items(): - attr_value = self.__dict__[attr_name] - if not isinstance(attr_value, (list, dict)): - if attr_value in valid_options: - continue - valid_type = False - for option in valid_options: - if (option in [int, float, list, dict, bool, str]) and isinstance( - attr_value, option - ): - valid_type = True - break - if valid_type: - continue - logger.error( - f"Invalid option for {attr_name}. " - f"Valid options for {attr_name}: {valid_options}" - ) - raise - - if self.filter_data is not None: - for key, value in self.filter_data.items(): - if not isinstance(value, list): - self.filter_data[key] = [value] - logger.warning( - "Values in filter_data dict must be lists. " - f"Changing {key} value to list ([{value}])." - ) - - if self.classifier == "cell": - if set(self.cell_state_dict.keys()) != set(["state_key", "states"]): - logger.error( - "Invalid keys for cell_state_dict. " - "The cell_state_dict should have only 2 keys: state_key and states" - ) - raise - - if self.cell_state_dict["states"] != "all": - if not isinstance(self.cell_state_dict["states"], list): - logger.error( - "States in cell_state_dict should be list of states to model." - ) - raise - if len(self.cell_state_dict["states"]) < 2: - logger.error( - "States in cell_state_dict should contain at least 2 states to classify." - ) - raise - - if self.classifier == "gene": - if len(self.gene_class_dict.keys()) < 2: - logger.error( - "Gene_class_dict should contain at least 2 gene classes to classify." - ) - raise - if sum(self.split_sizes.values()) != 1: - logger.error("Train, validation, and test proportions should sum to 1.") - raise - - def prepare_data( - self, - input_data_file, - output_directory, - output_prefix, - split_id_dict=None, - test_size=None, - attr_to_split=None, - attr_to_balance=None, - max_trials=100, - pval_threshold=0.1, - ): - """ - Prepare data for cell state or gene classification. - - **Parameters** - - input_data_file : Path - | Path to directory containing .dataset input - output_directory : Path - | Path to directory where prepared data will be saved - output_prefix : str - | Prefix for output file - split_id_dict : None, dict - | Dictionary of IDs for train and test splits - | Three-item dictionary with keys: attr_key, train, test - | attr_key: key specifying name of column in .dataset that contains the IDs for the data splits - | train: list of IDs in the attr_key column to include in the train split - | test: list of IDs in the attr_key column to include in the test split - | For example: {"attr_key": "individual", - | "train": ["patient1", "patient2", "patient3", "patient4"], - | "test": ["patient5", "patient6"]} - test_size : None, float - | Proportion of data to be saved separately and held out for test set - | (e.g. 0.2 if intending hold out 20%) - | If None, will inherit from split_sizes["test"] from Classifier - | The training set will be further split to train / validation in self.validate - | Note: only available for CellClassifiers - attr_to_split : None, str - | Key for attribute on which to split data while balancing potential confounders - | e.g. "patient_id" for splitting by patient while balancing other characteristics - | Note: only available for CellClassifiers - attr_to_balance : None, list - | List of attribute keys on which to balance data while splitting on attr_to_split - | e.g. ["age", "sex"] for balancing these characteristics while splitting by patient - | Note: only available for CellClassifiers - max_trials : None, int - | Maximum number of trials of random splitting to try to achieve balanced other attributes - | If no split is found without significant (p<0.05) differences in other attributes, will select best - | Note: only available for CellClassifiers - pval_threshold : None, float - | P-value threshold to use for attribute balancing across splits - | E.g. if set to 0.1, will accept trial if p >= 0.1 for all attributes in attr_to_balance - """ - - if test_size is None: - test_size = self.oos_test_size - - # prepare data and labels for classification - data = pu.load_and_filter(self.filter_data, self.nproc, input_data_file) - - if self.classifier == "cell": - if "label" in data.features: - logger.error( - "Column name 'label' must be reserved for class IDs. Please rename column." - ) - raise - elif self.classifier == "gene": - if "labels" in data.features: - logger.error( - "Column name 'labels' must be reserved for class IDs. Please rename column." - ) - raise - - if (attr_to_split is not None) and (attr_to_balance is None): - logger.error( - "Splitting by attribute while balancing confounders requires both attr_to_split and attr_to_balance to be defined." - ) - raise - - if not isinstance(attr_to_balance, list): - attr_to_balance = [attr_to_balance] - - if self.classifier == "cell": - # remove cell states representing < rare_threshold of cells - data = cu.remove_rare( - data, self.rare_threshold, self.cell_state_dict["state_key"], self.nproc - ) - # downsample max cells and max per class - data = cu.downsample_and_shuffle( - data, self.max_ncells, self.max_ncells_per_class, self.cell_state_dict - ) - # rename cell state column to "label" - data = cu.rename_cols(data, self.cell_state_dict["state_key"]) - - # convert classes to numerical labels and save as id_class_dict - # of note, will label all genes in gene_class_dict - # if (cross-)validating, genes will be relabeled in column "labels" for each split - # at the time of training with Classifier.validate - data, id_class_dict = cu.label_classes( - self.classifier, data, self.gene_class_dict, self.nproc - ) - - # save id_class_dict for future reference - id_class_output_path = ( - Path(output_directory) / f"{output_prefix}_id_class_dict" - ).with_suffix(".pkl") - with open(id_class_output_path, "wb") as f: - pickle.dump(id_class_dict, f) - - if split_id_dict is not None: - data_dict = dict() - data_dict["train"] = pu.filter_by_dict( - data, {split_id_dict["attr_key"]: split_id_dict["train"]}, self.nproc - ) - data_dict["test"] = pu.filter_by_dict( - data, {split_id_dict["attr_key"]: split_id_dict["test"]}, self.nproc - ) - train_data_output_path = ( - Path(output_directory) / f"{output_prefix}_labeled_train" - ).with_suffix(".dataset") - test_data_output_path = ( - Path(output_directory) / f"{output_prefix}_labeled_test" - ).with_suffix(".dataset") - data_dict["train"].save_to_disk(str(train_data_output_path)) - data_dict["test"].save_to_disk(str(test_data_output_path)) - elif (test_size is not None) and (self.classifier == "cell"): - if 1 > test_size > 0: - if attr_to_split is None: - data_dict = data.train_test_split( - test_size=test_size, - stratify_by_column=self.stratify_splits_col, - seed=42, - ) - train_data_output_path = ( - Path(output_directory) / f"{output_prefix}_labeled_train" - ).with_suffix(".dataset") - test_data_output_path = ( - Path(output_directory) / f"{output_prefix}_labeled_test" - ).with_suffix(".dataset") - data_dict["train"].save_to_disk(str(train_data_output_path)) - data_dict["test"].save_to_disk(str(test_data_output_path)) - else: - data_dict, balance_df = cu.balance_attr_splits( - data, - attr_to_split, - attr_to_balance, - test_size, - max_trials, - pval_threshold, - self.cell_state_dict["state_key"], - self.nproc, - ) - balance_df.to_csv( - f"{output_directory}/{output_prefix}_train_test_balance_df.csv" - ) - train_data_output_path = ( - Path(output_directory) / f"{output_prefix}_labeled_train" - ).with_suffix(".dataset") - test_data_output_path = ( - Path(output_directory) / f"{output_prefix}_labeled_test" - ).with_suffix(".dataset") - data_dict["train"].save_to_disk(str(train_data_output_path)) - data_dict["test"].save_to_disk(str(test_data_output_path)) - else: - data_output_path = ( - Path(output_directory) / f"{output_prefix}_labeled" - ).with_suffix(".dataset") - data.save_to_disk(str(data_output_path)) - print(data_output_path) - else: - data_output_path = ( - Path(output_directory) / f"{output_prefix}_labeled" - ).with_suffix(".dataset") - data.save_to_disk(str(data_output_path)) - - def train_all_data( - self, - model_directory, - prepared_input_data_file, - id_class_dict_file, - output_directory, - output_prefix, - save_eval_output=True, - gene_balance=False, - ): - """ - Train cell state or gene classifier using all data. - - **Parameters** - - model_directory : Path - | Path to directory containing model - prepared_input_data_file : Path - | Path to directory containing _labeled.dataset previously prepared by Classifier.prepare_data - id_class_dict_file : Path - | Path to _id_class_dict.pkl previously prepared by Classifier.prepare_data - | (dictionary of format: numerical IDs: class_labels) - output_directory : Path - | Path to directory where model and eval data will be saved - output_prefix : str - | Prefix for output files - save_eval_output : bool - | Whether to save cross-fold eval output - | Saves as pickle file of dictionary of eval metrics - gene_balance : None, bool - | Whether to automatically balance genes in training set. - | Only available for binary gene classifications. - - **Output** - - Returns trainer after fine-tuning with all data. - - """ - - if (gene_balance is True) and (len(self.gene_class_dict.values()) != 2): - logger.error( - "Automatically balancing gene sets for training is only available for binary gene classifications." - ) - raise - - ##### Load data and prepare output directory ##### - # load numerical id to class dictionary (id:class) - with open(id_class_dict_file, "rb") as f: - id_class_dict = pickle.load(f) - class_id_dict = {v: k for k, v in id_class_dict.items()} - - # load previously filtered and prepared data - data = pu.load_and_filter(None, self.nproc, prepared_input_data_file) - data = data.shuffle(seed=42) # reshuffle in case users provide unshuffled data - - # define output directory path - current_date = datetime.datetime.now() - datestamp = f"{str(current_date.year)[-2:]}{current_date.month:02d}{current_date.day:02d}" - if output_directory[-1:] != "/": # add slash for dir if not present - output_directory = output_directory + "/" - output_dir = f"{output_directory}{datestamp}_geneformer_{self.classifier}Classifier_{output_prefix}/" - subprocess.call(f"mkdir {output_dir}", shell=True) - - # get number of classes for classifier - num_classes = cu.get_num_classes(id_class_dict) - - if self.classifier == "gene": - targets = pu.flatten_list(self.gene_class_dict.values()) - labels = pu.flatten_list( - [ - [class_id_dict[label]] * len(targets) - for label, targets in self.gene_class_dict.items() - ] - ) - assert len(targets) == len(labels) - data = cu.prep_gene_classifier_all_data( - data, targets, labels, self.max_ncells, self.nproc, gene_balance - ) - - trainer = self.train_classifier( - model_directory, num_classes, data, None, output_dir - ) - - return trainer - - def validate( - self, - model_directory, - prepared_input_data_file, - id_class_dict_file, - output_directory, - output_prefix, - split_id_dict=None, - attr_to_split=None, - attr_to_balance=None, - gene_balance=False, - max_trials=100, - pval_threshold=0.1, - save_eval_output=True, - predict_eval=True, - predict_trainer=False, - n_hyperopt_trials=0, - save_gene_split_datasets=True, - debug_gene_split_datasets=False, - ): - """ - (Cross-)validate cell state or gene classifier. - - **Parameters** - - model_directory : Path - | Path to directory containing model - prepared_input_data_file : Path - | Path to directory containing _labeled.dataset previously prepared by Classifier.prepare_data - id_class_dict_file : Path - | Path to _id_class_dict.pkl previously prepared by Classifier.prepare_data - | (dictionary of format: numerical IDs: class_labels) - output_directory : Path - | Path to directory where model and eval data will be saved - output_prefix : str - | Prefix for output files - split_id_dict : None, dict - | Dictionary of IDs for train and eval splits - | Three-item dictionary with keys: attr_key, train, eval - | attr_key: key specifying name of column in .dataset that contains the IDs for the data splits - | train: list of IDs in the attr_key column to include in the train split - | eval: list of IDs in the attr_key column to include in the eval split - | For example: {"attr_key": "individual", - | "train": ["patient1", "patient2", "patient3", "patient4"], - | "eval": ["patient5", "patient6"]} - | Note: only available for CellClassifiers with 1-fold split (self.classifier="cell"; self.num_crossval_splits=1) - attr_to_split : None, str - | Key for attribute on which to split data while balancing potential confounders - | e.g. "patient_id" for splitting by patient while balancing other characteristics - | Note: only available for CellClassifiers with 1-fold split (self.classifier="cell"; self.num_crossval_splits=1) - attr_to_balance : None, list - | List of attribute keys on which to balance data while splitting on attr_to_split - | e.g. ["age", "sex"] for balancing these characteristics while splitting by patient - gene_balance : None, bool - | Whether to automatically balance genes in training set. - | Only available for binary gene classifications. - max_trials : None, int - | Maximum number of trials of random splitting to try to achieve balanced other attribute - | If no split is found without significant (p < pval_threshold) differences in other attributes, will select best - pval_threshold : None, float - | P-value threshold to use for attribute balancing across splits - | E.g. if set to 0.1, will accept trial if p >= 0.1 for all attributes in attr_to_balance - save_eval_output : bool - | Whether to save cross-fold eval output - | Saves as pickle file of dictionary of eval metrics - predict_eval : bool - | Whether or not to save eval predictions - | Saves as a pickle file of self.evaluate predictions - predict_trainer : bool - | Whether or not to save eval predictions from trainer - | Saves as a pickle file of trainer predictions - n_hyperopt_trials : int - | Number of trials to run for hyperparameter optimization - | If 0, will not optimize hyperparameters - save_gene_split_datasets : bool - | Whether or not to save train, valid, and test gene-labeled datasets - """ - if self.num_crossval_splits == 0: - logger.error("num_crossval_splits must be 1 or 5 to validate.") - raise - - if (gene_balance is True) and (len(self.gene_class_dict.values()) != 2): - logger.error( - "Automatically balancing gene sets for training is only available for binary gene classifications." - ) - raise - - # ensure number of genes in each class is > 5 if validating model - if self.classifier == "gene": - insuff_classes = [k for k, v in self.gene_class_dict.items() if len(v) < 5] - if (self.num_crossval_splits > 0) and (len(insuff_classes) > 0): - logger.error( - f"Insufficient # of members in class(es) {insuff_classes} to (cross-)validate." - ) - raise - - ##### Load data and prepare output directory ##### - # load numerical id to class dictionary (id:class) - with open(id_class_dict_file, "rb") as f: - id_class_dict = pickle.load(f) - class_id_dict = {v: k for k, v in id_class_dict.items()} - - # load previously filtered and prepared data - data = pu.load_and_filter(None, self.nproc, prepared_input_data_file) - data = data.shuffle(seed=42) # reshuffle in case users provide unshuffled data - - # define output directory path - current_date = datetime.datetime.now() - datestamp = f"{str(current_date.year)[-2:]}{current_date.month:02d}{current_date.day:02d}" - if output_directory[-1:] != "/": # add slash for dir if not present - output_directory = output_directory + "/" - output_dir = f"{output_directory}{datestamp}_geneformer_{self.classifier}Classifier_{output_prefix}/" - subprocess.call(f"mkdir {output_dir}", shell=True) - - # get number of classes for classifier - num_classes = cu.get_num_classes(id_class_dict) - - ##### (Cross-)validate the model ##### - results = [] - all_conf_mat = np.zeros((num_classes, num_classes)) - iteration_num = 1 - if self.classifier == "cell": - for i in trange(self.num_crossval_splits): - print( - f"****** Validation split: {iteration_num}/{self.num_crossval_splits} ******\n" - ) - ksplit_output_dir = os.path.join(output_dir, f"ksplit{iteration_num}") - if self.num_crossval_splits == 1: - # single 1-eval_size:eval_size split - if split_id_dict is not None: - data_dict = dict() - data_dict["train"] = pu.filter_by_dict( - data, - {split_id_dict["attr_key"]: split_id_dict["train"]}, - self.nproc, - ) - data_dict["test"] = pu.filter_by_dict( - data, - {split_id_dict["attr_key"]: split_id_dict["eval"]}, - self.nproc, - ) - elif attr_to_split is not None: - data_dict, balance_df = cu.balance_attr_splits( - data, - attr_to_split, - attr_to_balance, - self.eval_size, - max_trials, - pval_threshold, - self.cell_state_dict["state_key"], - self.nproc, - ) - - balance_df.to_csv( - f"{output_dir}/{output_prefix}_train_valid_balance_df.csv" - ) - else: - data_dict = data.train_test_split( - test_size=self.eval_size, - stratify_by_column=self.stratify_splits_col, - seed=42, - ) - train_data = data_dict["train"] - eval_data = data_dict["test"] - else: - # 5-fold cross-validate - num_cells = len(data) - fifth_cells = int(np.floor(num_cells * 0.2)) - num_eval = min((self.eval_size * num_cells), fifth_cells) - start = i * fifth_cells - end = start + num_eval - eval_indices = [j for j in range(start, end)] - train_indices = [ - j for j in range(num_cells) if j not in eval_indices - ] - eval_data = data.select(eval_indices) - train_data = data.select(train_indices) - if n_hyperopt_trials == 0: - trainer = self.train_classifier( - model_directory, - num_classes, - train_data, - eval_data, - ksplit_output_dir, - predict_trainer, - ) - else: - trainer = self.hyperopt_classifier( - model_directory, - num_classes, - train_data, - eval_data, - ksplit_output_dir, - n_trials=n_hyperopt_trials, - ) - if iteration_num == self.num_crossval_splits: - return - else: - iteration_num = iteration_num + 1 - continue - - result = self.evaluate_model( - trainer.model, - num_classes, - id_class_dict, - eval_data, - predict_eval, - ksplit_output_dir, - output_prefix, - ) - results += [result] - all_conf_mat = all_conf_mat + result["conf_mat"] - iteration_num = iteration_num + 1 - - elif self.classifier == "gene": - # set up (cross-)validation splits - targets = pu.flatten_list(self.gene_class_dict.values()) - labels = pu.flatten_list( - [ - [class_id_dict[label]] * len(targets) - for label, targets in self.gene_class_dict.items() - ] - ) - assert len(targets) == len(labels) - n_splits = int(1 / (1 - self.train_size)) - skf = cu.StratifiedKFold3(n_splits=n_splits, random_state=0, shuffle=True) - # (Cross-)validate - test_ratio = self.oos_test_size / (self.eval_size + self.oos_test_size) - for train_index, eval_index, test_index in tqdm( - skf.split(targets, labels, test_ratio) - ): - print( - f"****** Validation split: {iteration_num}/{self.num_crossval_splits} ******\n" - ) - ksplit_output_dir = os.path.join(output_dir, f"ksplit{iteration_num}") - # filter data for examples containing classes for this split - # subsample to max_ncells and relabel data in column "labels" - train_data, eval_data = cu.prep_gene_classifier_train_eval_split( - data, - targets, - labels, - train_index, - eval_index, - self.max_ncells, - iteration_num, - self.nproc, - gene_balance, - ) - - if save_gene_split_datasets is True: - for split_name in ["train", "valid"]: - labeled_dataset_output_path = ( - Path(output_dir) - / f"{output_prefix}_{split_name}_gene_labeled_ksplit{iteration_num}" - ).with_suffix(".dataset") - if split_name == "train": - train_data.save_to_disk(str(labeled_dataset_output_path)) - elif split_name == "valid": - eval_data.save_to_disk(str(labeled_dataset_output_path)) - - if self.oos_test_size > 0: - test_data = cu.prep_gene_classifier_split( - data, - targets, - labels, - test_index, - "test", - self.max_ncells, - iteration_num, - self.nproc, - ) - if save_gene_split_datasets is True: - test_labeled_dataset_output_path = ( - Path(output_dir) - / f"{output_prefix}_test_gene_labeled_ksplit{iteration_num}" - ).with_suffix(".dataset") - test_data.save_to_disk(str(test_labeled_dataset_output_path)) - if debug_gene_split_datasets is True: - logger.error( - "Exiting after saving gene split datasets given debug_gene_split_datasets = True." - ) - raise - if n_hyperopt_trials == 0: - trainer = self.train_classifier( - model_directory, - num_classes, - train_data, - eval_data, - ksplit_output_dir, - predict_trainer, - ) - result = self.evaluate_model( - trainer.model, - num_classes, - id_class_dict, - eval_data, - predict_eval, - ksplit_output_dir, - output_prefix, - ) - else: - trainer = self.hyperopt_classifier( - model_directory, - num_classes, - train_data, - eval_data, - ksplit_output_dir, - n_trials=n_hyperopt_trials, - ) - - model = cu.load_best_model( - ksplit_output_dir, self.model_type, num_classes - ) - - if self.oos_test_size > 0: - result = self.evaluate_model( - model, - num_classes, - id_class_dict, - test_data, - predict_eval, - ksplit_output_dir, - output_prefix, - ) - else: - if iteration_num == self.num_crossval_splits: - return - else: - iteration_num = iteration_num + 1 - continue - results += [result] - all_conf_mat = all_conf_mat + result["conf_mat"] - # break after 1 or 5 splits, each with train/eval proportions dictated by eval_size - if iteration_num == self.num_crossval_splits: - break - iteration_num = iteration_num + 1 - - all_conf_mat_df = pd.DataFrame( - all_conf_mat, columns=id_class_dict.values(), index=id_class_dict.values() - ) - all_metrics = { - "conf_matrix": all_conf_mat_df, - "macro_f1": [result["macro_f1"] for result in results], - "acc": [result["acc"] for result in results], - } - all_roc_metrics = None # roc metrics not reported for multiclass - if num_classes == 2: - mean_fpr = np.linspace(0, 1, 100) - all_tpr = [result["roc_metrics"]["interp_tpr"] for result in results] - all_roc_auc = [result["roc_metrics"]["auc"] for result in results] - all_tpr_wt = [result["roc_metrics"]["tpr_wt"] for result in results] - mean_tpr, roc_auc, roc_auc_sd = eu.get_cross_valid_roc_metrics( - all_tpr, all_roc_auc, all_tpr_wt - ) - all_roc_metrics = { - "mean_tpr": mean_tpr, - "mean_fpr": mean_fpr, - "all_roc_auc": all_roc_auc, - "roc_auc": roc_auc, - "roc_auc_sd": roc_auc_sd, - } - all_metrics["all_roc_metrics"] = all_roc_metrics - if save_eval_output is True: - eval_metrics_output_path = ( - Path(output_dir) / f"{output_prefix}_eval_metrics_dict" - ).with_suffix(".pkl") - with open(eval_metrics_output_path, "wb") as f: - pickle.dump(all_metrics, f) - - return all_metrics - - def hyperopt_classifier( - self, - model_directory, - num_classes, - train_data, - eval_data, - output_directory, - n_trials=100, - ): - """ - Fine-tune model for cell state or gene classification. - - **Parameters** - - model_directory : Path - | Path to directory containing model - num_classes : int - | Number of classes for classifier - train_data : Dataset - | Loaded training .dataset input - | For cell classifier, labels in column "label". - | For gene classifier, labels in column "labels". - eval_data : None, Dataset - | (Optional) Loaded evaluation .dataset input - | For cell classifier, labels in column "label". - | For gene classifier, labels in column "labels". - output_directory : Path - | Path to directory where fine-tuned model will be saved - n_trials : int - | Number of trials to run for hyperparameter optimization - """ - - # initiate runtime environment for raytune - import ray - from ray import tune - from ray.tune.search.hyperopt import HyperOptSearch - - ray.shutdown() # engage new ray session - ray.init() - - ##### Validate and prepare data ##### - train_data, eval_data = cu.validate_and_clean_cols( - train_data, eval_data, self.classifier - ) - - if (self.no_eval is True) and (eval_data is not None): - logger.warning( - "no_eval set to True; hyperparameter optimization requires eval, proceeding with eval" - ) - - # ensure not overwriting previously saved model - saved_model_test = os.path.join(output_directory, "pytorch_model.bin") - if os.path.isfile(saved_model_test) is True: - logger.error("Model already saved to this designated output directory.") - raise - # make output directory - subprocess.call(f"mkdir {output_directory}", shell=True) - - ##### Load model and training args ##### - model = pu.load_model( - self.model_type, - num_classes, - model_directory, - "train", - quantize=self.quantize, - ) - def_training_args, def_freeze_layers = cu.get_default_train_args( - model, self.classifier, train_data, output_directory - ) - del model - - if self.training_args is not None: - def_training_args.update(self.training_args) - logging_steps = round( - len(train_data) / def_training_args["per_device_train_batch_size"] / 10 - ) - def_training_args["logging_steps"] = logging_steps - def_training_args["output_dir"] = output_directory - if eval_data is None: - def_training_args["evaluation_strategy"] = "no" - def_training_args["load_best_model_at_end"] = False - def_training_args.update( - {"save_strategy": "epoch", "save_total_limit": 1} - ) # only save last model for each run - training_args_init = TrainingArguments(**def_training_args) - - ##### Fine-tune the model ##### - # define the data collator - if self.classifier == "cell": - data_collator = DataCollatorForCellClassification( - token_dictionary=self.gene_token_dict - ) - elif self.classifier == "gene": - data_collator = DataCollatorForGeneClassification( - token_dictionary=self.gene_token_dict - ) - - # define function to initiate model - def model_init(): - model = pu.load_model( - self.model_type, - num_classes, - model_directory, - "train", - quantize=self.quantize, - ) - - if self.freeze_layers is not None: - def_freeze_layers = self.freeze_layers - - if def_freeze_layers > 0: - modules_to_freeze = model.bert.encoder.layer[:def_freeze_layers] - for module in modules_to_freeze: - for param in module.parameters(): - param.requires_grad = False - - if self.quantize is False: - model = model.to("cuda:0") - return model - - # create the trainer - trainer = Trainer( - model_init=model_init, - args=training_args_init, - data_collator=data_collator, - train_dataset=train_data, - eval_dataset=eval_data, - compute_metrics=cu.compute_metrics, - ) - - # specify raytune hyperparameter search space - if self.ray_config is None: - logger.warning( - "No ray_config provided. Proceeding with default, but ranges may need adjustment depending on model." - ) - def_ray_config = { - "num_train_epochs": tune.choice([1]), - "learning_rate": tune.loguniform(1e-6, 1e-3), - "weight_decay": tune.uniform(0.0, 0.3), - "lr_scheduler_type": tune.choice(["linear", "cosine", "polynomial"]), - "warmup_steps": tune.uniform(100, 2000), - "seed": tune.uniform(0, 100), - "per_device_train_batch_size": tune.choice( - [def_training_args["per_device_train_batch_size"]] - ), - } - - hyperopt_search = HyperOptSearch(metric="eval_macro_f1", mode="max") - - # optimize hyperparameters - trainer.hyperparameter_search( - direction="maximize", - backend="ray", - resources_per_trial={"cpu": int(self.nproc / self.ngpu), "gpu": 1}, - hp_space=lambda _: def_ray_config - if self.ray_config is None - else self.ray_config, - search_alg=hyperopt_search, - n_trials=n_trials, # number of trials - progress_reporter=tune.CLIReporter( - max_report_frequency=600, - sort_by_metric=True, - max_progress_rows=n_trials, - mode="max", - metric="eval_macro_f1", - metric_columns=["loss", "eval_loss", "eval_accuracy", "eval_macro_f1"], - ), - storage_path=output_directory, - ) - - return trainer - - def train_classifier( - self, - model_directory, - num_classes, - train_data, - eval_data, - output_directory, - predict=False, - ): - """ - Fine-tune model for cell state or gene classification. - - **Parameters** - - model_directory : Path - | Path to directory containing model - num_classes : int - | Number of classes for classifier - train_data : Dataset - | Loaded training .dataset input - | For cell classifier, labels in column "label". - | For gene classifier, labels in column "labels". - eval_data : None, Dataset - | (Optional) Loaded evaluation .dataset input - | For cell classifier, labels in column "label". - | For gene classifier, labels in column "labels". - output_directory : Path - | Path to directory where fine-tuned model will be saved - predict : bool - | Whether or not to save eval predictions from trainer - """ - - ##### Validate and prepare data ##### - train_data, eval_data = cu.validate_and_clean_cols( - train_data, eval_data, self.classifier - ) - - if (self.no_eval is True) and (eval_data is not None): - logger.warning( - "no_eval set to True; model will be trained without evaluation." - ) - eval_data = None - - if (self.classifier == "gene") and (predict is True): - logger.warning( - "Predictions during training not currently available for gene classifiers; setting predict to False." - ) - predict = False - - # ensure not overwriting previously saved model - saved_model_test = os.path.join(output_directory, "pytorch_model.bin") - if os.path.isfile(saved_model_test) is True: - logger.error("Model already saved to this designated output directory.") - raise - # make output directory - subprocess.call(f"mkdir {output_directory}", shell=True) - - ##### Load model and training args ##### - model = pu.load_model( - self.model_type, - num_classes, - model_directory, - "train", - quantize=self.quantize, - ) - - def_training_args, def_freeze_layers = cu.get_default_train_args( - model, self.classifier, train_data, output_directory - ) - - if self.training_args is not None: - def_training_args.update(self.training_args) - logging_steps = round( - len(train_data) / def_training_args["per_device_train_batch_size"] / 10 - ) - def_training_args["logging_steps"] = logging_steps - def_training_args["output_dir"] = output_directory - if eval_data is None: - def_training_args["evaluation_strategy"] = "no" - def_training_args["load_best_model_at_end"] = False - training_args_init = TrainingArguments(**def_training_args) - - if self.freeze_layers is not None: - def_freeze_layers = self.freeze_layers - - if def_freeze_layers > 0: - modules_to_freeze = model.bert.encoder.layer[:def_freeze_layers] - for module in modules_to_freeze: - for param in module.parameters(): - param.requires_grad = False - - ##### Fine-tune the model ##### - # define the data collator - if self.classifier == "cell": - data_collator = DataCollatorForCellClassification( - token_dictionary=self.gene_token_dict - ) - elif self.classifier == "gene": - data_collator = DataCollatorForGeneClassification( - token_dictionary=self.gene_token_dict - ) - - # create the trainer - trainer = Trainer( - model=model, - args=training_args_init, - data_collator=data_collator, - train_dataset=train_data, - eval_dataset=eval_data, - compute_metrics=cu.compute_metrics, - ) - - # train the classifier - trainer.train() - trainer.save_model(output_directory) - if predict is True: - # make eval predictions and save predictions and metrics - predictions = trainer.predict(eval_data) - prediction_output_path = f"{output_directory}/predictions.pkl" - with open(prediction_output_path, "wb") as f: - pickle.dump(predictions, f) - trainer.save_metrics("eval", predictions.metrics) - return trainer - - def evaluate_model( - self, - model, - num_classes, - id_class_dict, - eval_data, - predict=False, - output_directory=None, - output_prefix=None, - ): - """ - Evaluate the fine-tuned model. - - **Parameters** - - model : nn.Module - | Loaded fine-tuned model (e.g. trainer.model) - num_classes : int - | Number of classes for classifier - id_class_dict : dict - | Loaded _id_class_dict.pkl previously prepared by Classifier.prepare_data - | (dictionary of format: numerical IDs: class_labels) - eval_data : Dataset - | Loaded evaluation .dataset input - predict : bool - | Whether or not to save eval predictions - output_directory : Path - | Path to directory where eval data will be saved - output_prefix : str - | Prefix for output files - """ - - ##### Evaluate the model ##### - labels = id_class_dict.keys() - y_pred, y_true, logits_list = eu.classifier_predict( - model, self.classifier, eval_data, self.forward_batch_size - ) - conf_mat, macro_f1, acc, roc_metrics = eu.get_metrics( - y_pred, y_true, logits_list, num_classes, labels - ) - if predict is True: - pred_dict = { - "pred_ids": y_pred, - "label_ids": y_true, - "predictions": logits_list, - } - pred_dict_output_path = ( - Path(output_directory) / f"{output_prefix}_pred_dict" - ).with_suffix(".pkl") - with open(pred_dict_output_path, "wb") as f: - pickle.dump(pred_dict, f) - return { - "conf_mat": conf_mat, - "macro_f1": macro_f1, - "acc": acc, - "roc_metrics": roc_metrics, - } - - def evaluate_saved_model( - self, - model_directory, - id_class_dict_file, - test_data_file, - output_directory, - output_prefix, - predict=True, - ): - """ - Evaluate the fine-tuned model. - - **Parameters** - - model_directory : Path - | Path to directory containing model - id_class_dict_file : Path - | Path to _id_class_dict.pkl previously prepared by Classifier.prepare_data - | (dictionary of format: numerical IDs: class_labels) - test_data_file : Path - | Path to directory containing test .dataset - output_directory : Path - | Path to directory where eval data will be saved - output_prefix : str - | Prefix for output files - predict : bool - | Whether or not to save eval predictions - """ - - # load numerical id to class dictionary (id:class) - with open(id_class_dict_file, "rb") as f: - id_class_dict = pickle.load(f) - - # get number of classes for classifier - num_classes = cu.get_num_classes(id_class_dict) - - # load previously filtered and prepared data - test_data = pu.load_and_filter(None, self.nproc, test_data_file) - - # load previously fine-tuned model - model = pu.load_model( - self.model_type, - num_classes, - model_directory, - "eval", - quantize=self.quantize, - ) - - # evaluate the model - result = self.evaluate_model( - model, - num_classes, - id_class_dict, - test_data, - predict=predict, - output_directory=output_directory, - output_prefix=output_prefix, - ) - - all_conf_mat_df = pd.DataFrame( - result["conf_mat"], - columns=id_class_dict.values(), - index=id_class_dict.values(), - ) - all_metrics = { - "conf_matrix": all_conf_mat_df, - "macro_f1": result["macro_f1"], - "acc": result["acc"], - } - all_roc_metrics = None # roc metrics not reported for multiclass - - if num_classes == 2: - mean_fpr = np.linspace(0, 1, 100) - mean_tpr = result["roc_metrics"]["interp_tpr"] - all_roc_auc = result["roc_metrics"]["auc"] - all_roc_metrics = { - "mean_tpr": mean_tpr, - "mean_fpr": mean_fpr, - "all_roc_auc": all_roc_auc, - } - all_metrics["all_roc_metrics"] = all_roc_metrics - test_metrics_output_path = ( - Path(output_directory) / f"{output_prefix}_test_metrics_dict" - ).with_suffix(".pkl") - with open(test_metrics_output_path, "wb") as f: - pickle.dump(all_metrics, f) - - return all_metrics - - def plot_conf_mat( - self, - conf_mat_dict, - output_directory, - output_prefix, - custom_class_order=None, - ): - """ - Plot confusion matrix results of evaluating the fine-tuned model. - - **Parameters** - - conf_mat_dict : dict - | Dictionary of model_name : confusion_matrix_DataFrame - | (all_metrics["conf_matrix"] from self.validate) - output_directory : Path - | Path to directory where plots will be saved - output_prefix : str - | Prefix for output file - custom_class_order : None, list - | List of classes in custom order for plots. - | Same order will be used for all models. - """ - - for model_name in conf_mat_dict.keys(): - eu.plot_confusion_matrix( - conf_mat_dict[model_name], - model_name, - output_directory, - output_prefix, - custom_class_order, - ) - - def plot_roc( - self, - roc_metric_dict, - model_style_dict, - title, - output_directory, - output_prefix, - ): - """ - Plot ROC curve results of evaluating the fine-tuned model. - - **Parameters** - - roc_metric_dict : dict - | Dictionary of model_name : roc_metrics - | (all_metrics["all_roc_metrics"] from self.validate) - model_style_dict : dict[dict] - | Dictionary of model_name : dictionary of style_attribute : style - | where style includes color and linestyle - | e.g. {'Model_A': {'color': 'black', 'linestyle': '-'}, 'Model_B': ...} - title : str - | Title of plot (e.g. 'Dosage-sensitive vs -insensitive factors') - output_directory : Path - | Path to directory where plots will be saved - output_prefix : str - | Prefix for output file - """ - - eu.plot_ROC( - roc_metric_dict, model_style_dict, title, output_directory, output_prefix - ) - - def plot_predictions( - self, - predictions_file, - id_class_dict_file, - title, - output_directory, - output_prefix, - custom_class_order=None, - kwargs_dict=None, - ): - """ - Plot prediction results of evaluating the fine-tuned model. - - **Parameters** - - predictions_file : path - | Path of model predictions output to plot - | (saved output from self.validate if predict_eval=True) - | (or saved output from self.evaluate_saved_model) - id_class_dict_file : Path - | Path to _id_class_dict.pkl previously prepared by Classifier.prepare_data - | (dictionary of format: numerical IDs: class_labels) - title : str - | Title for legend containing class labels. - output_directory : Path - | Path to directory where plots will be saved - output_prefix : str - | Prefix for output file - custom_class_order : None, list - | List of classes in custom order for plots. - | Same order will be used for all models. - kwargs_dict : None, dict - | Dictionary of kwargs to pass to plotting function. - """ - # load predictions - with open(predictions_file, "rb") as f: - predictions = pickle.load(f) - - # load numerical id to class dictionary (id:class) - with open(id_class_dict_file, "rb") as f: - id_class_dict = pickle.load(f) - - if isinstance(predictions, dict): - if all( - [ - key in predictions.keys() - for key in ["pred_ids", "label_ids", "predictions"] - ] - ): - # format is output from self.evaluate_saved_model - predictions_logits = np.array(predictions["predictions"]) - true_ids = predictions["label_ids"] - else: - # format is output from self.validate if predict_eval=True - predictions_logits = predictions.predictions - true_ids = predictions.label_ids - - num_classes = len(id_class_dict.keys()) - num_predict_classes = predictions_logits.shape[1] - assert num_classes == num_predict_classes - classes = id_class_dict.values() - true_labels = [id_class_dict[idx] for idx in true_ids] - predictions_df = pd.DataFrame(predictions_logits, columns=classes) - if custom_class_order is not None: - predictions_df = predictions_df.reindex(columns=custom_class_order) - predictions_df["true"] = true_labels - custom_dict = dict(zip(classes, [i for i in range(len(classes))])) - if custom_class_order is not None: - custom_dict = dict( - zip(custom_class_order, [i for i in range(len(custom_class_order))]) - ) - predictions_df = predictions_df.sort_values( - by=["true"], key=lambda x: x.map(custom_dict) - ) - - eu.plot_predictions( - predictions_df, title, output_directory, output_prefix, kwargs_dict - ) diff --git a/geneformer/classifier_utils.py b/geneformer/classifier_utils.py deleted file mode 100644 index d2da349a731bbeb4dc023b48a6bd283c7381e236..0000000000000000000000000000000000000000 --- a/geneformer/classifier_utils.py +++ /dev/null @@ -1,648 +0,0 @@ -import json -import logging -import os -import random -from collections import Counter, defaultdict - -import numpy as np -import pandas as pd -from scipy.stats import chisquare, ranksums -from sklearn.metrics import accuracy_score, f1_score -from sklearn.model_selection import StratifiedKFold, train_test_split - -from . import perturber_utils as pu - -logger = logging.getLogger(__name__) - - -def downsample_and_shuffle(data, max_ncells, max_ncells_per_class, cell_state_dict): - data = data.shuffle(seed=42) - num_cells = len(data) - # if max number of cells is defined, then subsample to this max number - if max_ncells is not None: - if num_cells > max_ncells: - data = data.select([i for i in range(max_ncells)]) - if max_ncells_per_class is not None: - class_labels = data[cell_state_dict["state_key"]] - random.seed(42) - subsample_indices = subsample_by_class(class_labels, max_ncells_per_class) - data = data.select(subsample_indices) - return data - - -# subsample labels to maximum number N per class and return indices -def subsample_by_class(labels, N): - label_indices = defaultdict(list) - # Gather indices for each label - for idx, label in enumerate(labels): - label_indices[label].append(idx) - selected_indices = [] - # Select up to N indices for each label - for label, indices in label_indices.items(): - if len(indices) > N: - selected_indices.extend(random.sample(indices, N)) - else: - selected_indices.extend(indices) - return selected_indices - - -def rename_cols(data, state_key): - data = data.rename_column(state_key, "label") - return data - - -def validate_and_clean_cols(train_data, eval_data, classifier): - # validate that data has expected label column and remove others - if classifier == "cell": - label_col = "label" - elif classifier == "gene": - label_col = "labels" - - cols_to_keep = [label_col] + ["input_ids", "length"] - if label_col not in train_data.column_names: - logger.error(f"train_data must contain column {label_col} with class labels.") - raise - else: - train_data = remove_cols(train_data, cols_to_keep) - - if eval_data is not None: - if label_col not in eval_data.column_names: - logger.error( - f"eval_data must contain column {label_col} with class labels." - ) - raise - else: - eval_data = remove_cols(eval_data, cols_to_keep) - return train_data, eval_data - - -def remove_cols(data, cols_to_keep): - other_cols = list(data.features.keys()) - other_cols = [ele for ele in other_cols if ele not in cols_to_keep] - data = data.remove_columns(other_cols) - return data - - -def remove_rare(data, rare_threshold, label, nproc): - if rare_threshold > 0: - total_cells = len(data) - label_counter = Counter(data[label]) - nonrare_label_dict = { - label: [k for k, v in label_counter if (v / total_cells) > rare_threshold] - } - data = pu.filter_by_dict(data, nonrare_label_dict, nproc) - return data - - -def label_classes(classifier, data, gene_class_dict, nproc): - if classifier == "cell": - label_set = set(data["label"]) - elif classifier == "gene": - # remove cells without any of the target genes - def if_contains_label(example): - a = pu.flatten_list(gene_class_dict.values()) - b = example["input_ids"] - return not set(a).isdisjoint(b) - - data = data.filter(if_contains_label, num_proc=nproc) - label_set = gene_class_dict.keys() - - if len(data) == 0: - logger.error( - "No cells remain after filtering for target genes. Check target gene list." - ) - raise - - class_id_dict = dict(zip(label_set, [i for i in range(len(label_set))])) - id_class_dict = {v: k for k, v in class_id_dict.items()} - - def classes_to_ids(example): - if classifier == "cell": - example["label"] = class_id_dict[example["label"]] - elif classifier == "gene": - example["labels"] = label_gene_classes( - example, class_id_dict, gene_class_dict - ) - return example - - data = data.map(classes_to_ids, num_proc=nproc) - return data, id_class_dict - - -def label_gene_classes(example, class_id_dict, gene_class_dict): - return [ - class_id_dict.get(gene_class_dict.get(token_id, -100), -100) - for token_id in example["input_ids"] - ] - - -def prep_gene_classifier_train_eval_split( - data, - targets, - labels, - train_index, - eval_index, - max_ncells, - iteration_num, - num_proc, - balance=False, -): - # generate cross-validation splits - train_data = prep_gene_classifier_split( - data, - targets, - labels, - train_index, - "train", - max_ncells, - iteration_num, - num_proc, - balance, - ) - eval_data = prep_gene_classifier_split( - data, - targets, - labels, - eval_index, - "eval", - max_ncells, - iteration_num, - num_proc, - balance, - ) - return train_data, eval_data - - -def prep_gene_classifier_split( - data, - targets, - labels, - index, - subset_name, - max_ncells, - iteration_num, - num_proc, - balance=False, -): - # generate cross-validation splits - targets = np.array(targets) - labels = np.array(labels) - targets_subset = targets[index] - labels_subset = labels[index] - label_dict_subset = dict(zip(targets_subset, labels_subset)) - - # function to filter by whether contains train or eval labels - def if_contains_subset_label(example): - a = targets_subset - b = example["input_ids"] - return not set(a).isdisjoint(b) - - # filter dataset for examples containing classes for this split - logger.info(f"Filtering data for {subset_name} genes in split {iteration_num}") - subset_data = data.filter(if_contains_subset_label, num_proc=num_proc) - logger.info( - f"Filtered {round((1-len(subset_data)/len(data))*100)}%; {len(subset_data)} remain\n" - ) - - # balance gene subsets if train - if (subset_name == "train") and (balance is True): - subset_data, label_dict_subset = balance_gene_split( - subset_data, label_dict_subset, num_proc - ) - - # subsample to max_ncells - subset_data = downsample_and_shuffle(subset_data, max_ncells, None, None) - - # relabel genes for this split - def subset_classes_to_ids(example): - example["labels"] = [ - label_dict_subset.get(token_id, -100) for token_id in example["input_ids"] - ] - return example - - subset_data = subset_data.map(subset_classes_to_ids, num_proc=num_proc) - - return subset_data - - -def prep_gene_classifier_all_data( - data, targets, labels, max_ncells, num_proc, balance=False -): - targets = np.array(targets) - labels = np.array(labels) - label_dict_train = dict(zip(targets, labels)) - - # function to filter by whether contains train labels - def if_contains_train_label(example): - a = targets - b = example["input_ids"] - return not set(a).isdisjoint(b) - - # filter dataset for examples containing classes for this split - logger.info("Filtering training data for genes to classify.") - train_data = data.filter(if_contains_train_label, num_proc=num_proc) - logger.info( - f"Filtered {round((1-len(train_data)/len(data))*100)}%; {len(train_data)} remain\n" - ) - - if balance is True: - train_data, label_dict_train = balance_gene_split( - train_data, label_dict_train, num_proc - ) - - # subsample to max_ncells - train_data = downsample_and_shuffle(train_data, max_ncells, None, None) - - # relabel genes for this split - def train_classes_to_ids(example): - example["labels"] = [ - label_dict_train.get(token_id, -100) for token_id in example["input_ids"] - ] - return example - - train_data = train_data.map(train_classes_to_ids, num_proc=num_proc) - - return train_data - - -def balance_gene_split(subset_data, label_dict_subset, num_proc): - # count occurrence of genes in each label category - label0_counts, label1_counts = count_genes_for_balancing( - subset_data, label_dict_subset, num_proc - ) - label_ratio_0to1 = label0_counts / label1_counts - - if 8 / 10 <= label_ratio_0to1 <= 10 / 8: - # gene sets already balanced - logger.info( - "Gene sets were already balanced within 0.8-1.25 fold and did not require balancing.\n" - ) - return subset_data, label_dict_subset - else: - label_ratio_0to1_orig = label_ratio_0to1 + 0 - label_dict_subset_orig = label_dict_subset.copy() - # balance gene sets - max_ntrials = 25 - boost = 1 - if label_ratio_0to1 > 10 / 8: - # downsample label 0 - for i in range(max_ntrials): - label0 = 0 - label0_genes = [k for k, v in label_dict_subset.items() if v == label0] - label0_ngenes = len(label0_genes) - label0_nremove = max( - 1, - int( - np.floor( - label0_ngenes - label0_ngenes / (label_ratio_0to1 * boost) - ) - ), - ) - random.seed(i) - label0_remove_genes = random.sample(label0_genes, label0_nremove) - label_dict_subset_new = { - k: v - for k, v in label_dict_subset.items() - if k not in label0_remove_genes - } - label0_counts, label1_counts = count_genes_for_balancing( - subset_data, label_dict_subset_new, num_proc - ) - label_ratio_0to1 = label0_counts / label1_counts - if 8 / 10 <= label_ratio_0to1 <= 10 / 8: - # if gene sets now balanced, return new filtered data and new label_dict_subset - return filter_data_balanced_genes( - subset_data, label_dict_subset_new, num_proc - ) - elif label_ratio_0to1 > 10 / 8: - boost = boost * 1.1 - elif label_ratio_0to1 < 8 / 10: - boost = boost * 0.9 - else: - # downsample label 1 - for i in range(max_ntrials): - label1 = 1 - label1_genes = [k for k, v in label_dict_subset.items() if v == label1] - label1_ngenes = len(label1_genes) - label1_nremove = max( - 1, - int( - np.floor( - label1_ngenes - - label1_ngenes / ((1 / label_ratio_0to1) * boost) - ) - ), - ) - random.seed(i) - label1_remove_genes = random.sample(label1_genes, label1_nremove) - label_dict_subset_new = { - k: v - for k, v in label_dict_subset.items() - if k not in label1_remove_genes - } - label0_counts, label1_counts = count_genes_for_balancing( - subset_data, label_dict_subset_new, num_proc - ) - label_ratio_0to1 = label0_counts / label1_counts - if 8 / 10 <= label_ratio_0to1 <= 10 / 8: - # if gene sets now balanced, return new filtered data and new label_dict_subset - return filter_data_balanced_genes( - subset_data, label_dict_subset_new, num_proc - ) - elif label_ratio_0to1 < 8 / 10: - boost = boost * 1.1 - elif label_ratio_0to1 > 10 / 8: - boost = boost * 0.9 - - assert i + 1 == max_ntrials - if (label_ratio_0to1 <= label_ratio_0to1_orig < 8 / 10) or ( - 10 / 8 > label_ratio_0to1_orig >= label_ratio_0to1 - ): - label_ratio_0to1 = label_ratio_0to1_orig - label_dict_subset_new = label_dict_subset_orig - logger.warning( - f"Gene sets were not able to be balanced within 0.8-1.25 fold after {max_ntrials} trials. Imbalance level: {label_ratio_0to1}\n" - ) - return filter_data_balanced_genes(subset_data, label_dict_subset_new, num_proc) - - -def count_genes_for_balancing(subset_data, label_dict_subset, num_proc): - def count_targets(example): - labels = [ - label_dict_subset.get(token_id, -100) for token_id in example["input_ids"] - ] - counter_labels = Counter(labels) - # get count of labels 0 or 1, or if absent, return 0 - example["labels_counts"] = [counter_labels.get(0, 0), counter_labels.get(1, 0)] - return example - - subset_data = subset_data.map(count_targets, num_proc=num_proc) - - label0_counts = sum([counts[0] for counts in subset_data["labels_counts"]]) - label1_counts = sum([counts[1] for counts in subset_data["labels_counts"]]) - - subset_data = subset_data.remove_columns("labels_counts") - - return label0_counts, label1_counts - - -def filter_data_balanced_genes(subset_data, label_dict_subset, num_proc): - # function to filter by whether contains labels - def if_contains_subset_label(example): - a = list(label_dict_subset.keys()) - b = example["input_ids"] - return not set(a).isdisjoint(b) - - # filter dataset for examples containing classes for this split - logger.info("Filtering data for balanced genes") - subset_data_len_orig = len(subset_data) - subset_data = subset_data.filter(if_contains_subset_label, num_proc=num_proc) - logger.info( - f"Filtered {round((1-len(subset_data)/subset_data_len_orig)*100)}%; {len(subset_data)} remain\n" - ) - - return subset_data, label_dict_subset - - -def balance_attr_splits( - data, - attr_to_split, - attr_to_balance, - eval_size, - max_trials, - pval_threshold, - state_key, - nproc, -): - metadata_df = pd.DataFrame({"split_attr_ids": data[attr_to_split]}) - for attr in attr_to_balance: - if attr == state_key: - metadata_df[attr] = data["label"] - else: - metadata_df[attr] = data[attr] - metadata_df = metadata_df.drop_duplicates() - - split_attr_ids = list(metadata_df["split_attr_ids"]) - assert len(split_attr_ids) == len(set(split_attr_ids)) - eval_num = round(len(split_attr_ids) * eval_size) - colnames = ( - ["trial_num", "train_ids", "eval_ids"] - + pu.flatten_list( - [ - [ - f"{attr}_train_mean_or_counts", - f"{attr}_eval_mean_or_counts", - f"{attr}_pval", - ] - for attr in attr_to_balance - ] - ) - + ["mean_pval"] - ) - balance_df = pd.DataFrame(columns=colnames) - data_dict = dict() - trial_num = 1 - for i in range(max_trials): - if not all( - count > 1 for count in list(Counter(metadata_df[state_key]).values()) - ): - logger.error( - f"Cannot balance by {attr_to_split} while retaining at least 1 occurrence of each {state_key} class in both data splits. " - ) - raise - eval_base = [] - for state in set(metadata_df[state_key]): - eval_base += list( - metadata_df.loc[ - metadata_df[state_key][metadata_df[state_key].eq(state)] - .sample(1, random_state=i) - .index - ]["split_attr_ids"] - ) - non_eval_base = [idx for idx in split_attr_ids if idx not in eval_base] - random.seed(i) - eval_ids = random.sample(non_eval_base, eval_num - len(eval_base)) + eval_base - train_ids = [idx for idx in split_attr_ids if idx not in eval_ids] - df_vals = [trial_num, train_ids, eval_ids] - pvals = [] - for attr in attr_to_balance: - train_attr = list( - metadata_df[metadata_df["split_attr_ids"].isin(train_ids)][attr] - ) - eval_attr = list( - metadata_df[metadata_df["split_attr_ids"].isin(eval_ids)][attr] - ) - if attr == state_key: - # ensure IDs are interpreted as categorical - train_attr = [str(item) for item in train_attr] - eval_attr = [str(item) for item in eval_attr] - if all(isinstance(item, (int, float)) for item in train_attr + eval_attr): - train_attr_mean = np.nanmean(train_attr) - eval_attr_mean = np.nanmean(eval_attr) - pval = ranksums(train_attr, eval_attr, nan_policy="omit").pvalue - df_vals += [train_attr_mean, eval_attr_mean, pval] - elif all(isinstance(item, (str)) for item in train_attr + eval_attr): - obs_counts = Counter(train_attr) - exp_counts = Counter(eval_attr) - all_categ = set(obs_counts.keys()).union(set(exp_counts.keys())) - obs = [obs_counts[cat] for cat in all_categ] - exp = [ - exp_counts[cat] * sum(obs) / sum(exp_counts.values()) - for cat in all_categ - ] - pval = chisquare(f_obs=obs, f_exp=exp).pvalue - train_attr_counts = str(obs_counts).strip("Counter(").strip(")") - eval_attr_counts = str(exp_counts).strip("Counter(").strip(")") - df_vals += [train_attr_counts, eval_attr_counts, pval] - else: - logger.error( - f"Inconsistent data types in attribute {attr}. " - "Cannot infer if continuous or categorical. " - "Must be all numeric (continuous) or all strings (categorical) to balance." - ) - raise - pvals += [pval] - - df_vals += [np.nanmean(pvals)] - balance_df_i = pd.DataFrame(df_vals, index=colnames).T - balance_df = pd.concat([balance_df, balance_df_i], ignore_index=True) - valid_pvals = [ - pval_i - for pval_i in pvals - if isinstance(pval_i, (int, float)) and not np.isnan(pval_i) - ] - if all(i >= pval_threshold for i in valid_pvals): - data_dict["train"] = pu.filter_by_dict( - data, {attr_to_split: balance_df_i["train_ids"][0]}, nproc - ) - data_dict["test"] = pu.filter_by_dict( - data, {attr_to_split: balance_df_i["eval_ids"][0]}, nproc - ) - return data_dict, balance_df - trial_num = trial_num + 1 - balance_max_df = balance_df.iloc[balance_df["mean_pval"].idxmax(), :] - data_dict["train"] = pu.filter_by_dict( - data, {attr_to_split: balance_df_i["train_ids"][0]}, nproc - ) - data_dict["test"] = pu.filter_by_dict( - data, {attr_to_split: balance_df_i["eval_ids"][0]}, nproc - ) - logger.warning( - f"No splits found without significant difference in attr_to_balance among {max_trials} trials. " - f"Selecting optimal split (trial #{balance_max_df['trial_num']}) from completed trials." - ) - return data_dict, balance_df - - -def get_num_classes(id_class_dict): - return len(set(id_class_dict.values())) - - -def compute_metrics(pred): - labels = pred.label_ids - preds = pred.predictions.argmax(-1) - - # calculate accuracy and macro f1 using sklearn's function - if len(labels.shape) == 1: - acc = accuracy_score(labels, preds) - macro_f1 = f1_score(labels, preds, average="macro") - else: - flat_labels = labels.flatten().tolist() - flat_preds = preds.flatten().tolist() - logit_label_paired = [ - item for item in list(zip(flat_preds, flat_labels)) if item[1] != -100 - ] - y_pred = [item[0] for item in logit_label_paired] - y_true = [item[1] for item in logit_label_paired] - - acc = accuracy_score(y_true, y_pred) - macro_f1 = f1_score(y_true, y_pred, average="macro") - - return {"accuracy": acc, "macro_f1": macro_f1} - - -def get_default_train_args(model, classifier, data, output_dir): - num_layers = pu.quant_layers(model) - freeze_layers = 0 - batch_size = 12 - if classifier == "cell": - epochs = 10 - evaluation_strategy = "epoch" - load_best_model_at_end = True - else: - epochs = 1 - evaluation_strategy = "no" - load_best_model_at_end = False - - if num_layers == 6: - default_training_args = { - "learning_rate": 5e-5, - "lr_scheduler_type": "linear", - "warmup_steps": 500, - "per_device_train_batch_size": batch_size, - "per_device_eval_batch_size": batch_size, - } - else: - default_training_args = { - "per_device_train_batch_size": batch_size, - "per_device_eval_batch_size": batch_size, - } - - training_args = { - "num_train_epochs": epochs, - "do_train": True, - "do_eval": True, - "evaluation_strategy": evaluation_strategy, - "logging_steps": np.floor(len(data) / batch_size / 8), # 8 evals per epoch - "save_strategy": "epoch", - "group_by_length": False, - "length_column_name": "length", - "disable_tqdm": False, - "weight_decay": 0.001, - "load_best_model_at_end": load_best_model_at_end, - } - training_args.update(default_training_args) - - return training_args, freeze_layers - - -def load_best_model(directory, model_type, num_classes, mode="eval"): - file_dict = dict() - for subdir, dirs, files in os.walk(directory): - for file in files: - if file.endswith("result.json"): - with open(f"{subdir}/{file}", "rb") as fp: - result_json = json.load(fp) - file_dict[f"{subdir}"] = result_json["eval_macro_f1"] - file_df = pd.DataFrame( - {"dir": file_dict.keys(), "eval_macro_f1": file_dict.values()} - ) - model_superdir = ( - "run-" - + file_df.iloc[file_df["eval_macro_f1"].idxmax()]["dir"] - .split("_objective_")[2] - .split("_")[0] - ) - - for subdir, dirs, files in os.walk(f"{directory}/{model_superdir}"): - for file in files: - if file.endswith("model.safetensors"): - model = pu.load_model(model_type, num_classes, f"{subdir}", mode) - return model - - -class StratifiedKFold3(StratifiedKFold): - def split(self, targets, labels, test_ratio=0.5, groups=None): - s = super().split(targets, labels, groups) - for train_indxs, test_indxs in s: - if test_ratio == 0: - yield train_indxs, test_indxs, None - else: - labels_test = np.array(labels)[test_indxs] - valid_indxs, test_indxs = train_test_split( - test_indxs, - stratify=labels_test, - test_size=test_ratio, - random_state=0, - ) - yield train_indxs, valid_indxs, test_indxs diff --git a/geneformer/collator_for_classification.py b/geneformer/collator_for_classification.py index 297fa666dbf0daeaa94e2ca203ace5f98570a30e..42cee08ffa5e225de34f20c9885438f72675cedb 100644 --- a/geneformer/collator_for_classification.py +++ b/geneformer/collator_for_classification.py @@ -1,22 +1,24 @@ """ Geneformer collator for gene and cell classification. + Huggingface data collator modified to accommodate single-cell transcriptomics data for gene and cell classification. """ - +import numpy as np +import torch import warnings from enum import Enum from typing import Dict, List, Optional, Union -import numpy as np -import torch from transformers import ( - BatchEncoding, DataCollatorForTokenClassification, SpecialTokensMixin, + BatchEncoding, ) from transformers.utils import is_tf_available, is_torch_available, logging, to_py_obj from transformers.utils.generic import _is_tensorflow, _is_torch +from .pretrainer import token_dictionary + EncodedInput = List[int] logger = logging.get_logger(__name__) VERY_LARGE_INTEGER = int( @@ -28,7 +30,6 @@ LARGE_INTEGER = int( # precollator functions - class ExplicitEnum(Enum): """ Enum with more explicit error message for missing values. @@ -41,7 +42,6 @@ class ExplicitEnum(Enum): % (value, cls.__name__, str(list(cls._value2member_map_.keys()))) ) - class TruncationStrategy(ExplicitEnum): """ Possible values for the ``truncation`` argument in :meth:`PreTrainedTokenizerBase.__call__`. Useful for @@ -54,6 +54,7 @@ class TruncationStrategy(ExplicitEnum): DO_NOT_TRUNCATE = "do_not_truncate" + class PaddingStrategy(ExplicitEnum): """ Possible values for the ``padding`` argument in :meth:`PreTrainedTokenizerBase.__call__`. Useful for tab-completion @@ -65,6 +66,7 @@ class PaddingStrategy(ExplicitEnum): DO_NOT_PAD = "do_not_pad" + class TensorType(ExplicitEnum): """ Possible values for the ``return_tensors`` argument in :meth:`PreTrainedTokenizerBase.__call__`. Useful for @@ -76,41 +78,21 @@ class TensorType(ExplicitEnum): NUMPY = "np" JAX = "jax" - + class PrecollatorForGeneAndCellClassification(SpecialTokensMixin): - def __init__(self, *args, **kwargs) -> None: - super().__init__(mask_token="", pad_token="") - - self.token_dictionary = kwargs.get("token_dictionary") - self.padding_side = "right" - self.model_input_names = ["input_ids"] - self._mask_token_id = self.token_dictionary.get("") - self._pad_token_id = self.token_dictionary.get("") - self._all_special_ids = [ - self.token_dictionary.get(""), - self.token_dictionary.get(""), - ] - - @property - def all_special_ids(self): - return self._all_special_ids - - @property - def mask_token_id(self): - return self._mask_token_id - - @property - def pad_token_id(self): - return self._pad_token_id + mask_token = "" + mask_token_id = token_dictionary.get("") + pad_token = "" + pad_token_id = token_dictionary.get("") + padding_side = "right" + all_special_ids = [ + token_dictionary.get(""), + token_dictionary.get("") + ] + model_input_names = ["input_ids"] def _get_padding_truncation_strategies( - self, - padding=True, - truncation=False, - max_length=None, - pad_to_multiple_of=None, - verbose=True, - **kwargs, + self, padding=True, truncation=False, max_length=None, pad_to_multiple_of=None, verbose=True, **kwargs ): """ Find the correct padding/truncation strategy with backward compatibility for old arguments (truncation_strategy @@ -123,9 +105,7 @@ class PrecollatorForGeneAndCellClassification(SpecialTokensMixin): # If you only set max_length, it activates truncation for max_length if max_length is not None and padding is False and truncation is False: if verbose: - if not self.deprecation_warnings.get( - "Truncation-not-explicitly-activated", False - ): + if not self.deprecation_warnings.get("Truncation-not-explicitly-activated", False): logger.warning( "Truncation was not explicitly activated but `max_length` is provided a specific value, " "please use `truncation=True` to explicitly truncate examples to max length. " @@ -153,9 +133,7 @@ class PrecollatorForGeneAndCellClassification(SpecialTokensMixin): padding_strategy = PaddingStrategy.MAX_LENGTH elif padding is not False: if padding is True: - padding_strategy = ( - PaddingStrategy.LONGEST - ) # Default to pad to the longest sequence in the batch + padding_strategy = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(padding, PaddingStrategy): padding_strategy = PaddingStrategy(padding) elif isinstance(padding, PaddingStrategy): @@ -195,9 +173,7 @@ class PrecollatorForGeneAndCellClassification(SpecialTokensMixin): if padding_strategy == PaddingStrategy.MAX_LENGTH: if self.model_max_length > LARGE_INTEGER: if verbose: - if not self.deprecation_warnings.get( - "Asking-to-pad-to-max_length", False - ): + if not self.deprecation_warnings.get("Asking-to-pad-to-max_length", False): logger.warning( "Asking to pad to max_length but no maximum length is provided and the model has no predefined maximum length. " "Default to no padding." @@ -210,24 +186,18 @@ class PrecollatorForGeneAndCellClassification(SpecialTokensMixin): if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE: if self.model_max_length > LARGE_INTEGER: if verbose: - if not self.deprecation_warnings.get( - "Asking-to-truncate-to-max_length", False - ): + if not self.deprecation_warnings.get("Asking-to-truncate-to-max_length", False): logger.warning( "Asking to truncate to max_length but no maximum length is provided and the model has no predefined maximum length. " "Default to no truncation." ) - self.deprecation_warnings[ - "Asking-to-truncate-to-max_length" - ] = True + self.deprecation_warnings["Asking-to-truncate-to-max_length"] = True truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE else: max_length = self.model_max_length # Test if we have a padding token - if padding_strategy != PaddingStrategy.DO_NOT_PAD and ( - not self.pad_token or self.pad_token_id < 0 - ): + if padding_strategy != PaddingStrategy.DO_NOT_PAD and (not self.pad_token or self.pad_token_id < 0): raise ValueError( "Asking to pad but the tokenizer does not have a padding token. " "Please select a token to use as `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` " @@ -258,7 +228,7 @@ class PrecollatorForGeneAndCellClassification(SpecialTokensMixin): Dict[str, List[EncodedInput]], List[Dict[str, EncodedInput]], ], - class_type, # options: "gene" or "cell" + class_type, # options: "gene" or "cell" padding: Union[bool, str, PaddingStrategy] = True, max_length: Optional[int] = None, pad_to_multiple_of: Optional[int] = None, @@ -269,23 +239,29 @@ class PrecollatorForGeneAndCellClassification(SpecialTokensMixin): """ Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length in the batch. + Padding side (left/right) padding token ids are defined at the tokenizer level (with ``self.padding_side``, ``self.pad_token_id`` and ``self.pad_token_type_id``) + .. note:: + If the ``encoded_inputs`` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the result will use the same type unless you provide a different tensor type with ``return_tensors``. In the case of PyTorch tensors, you will lose the specific device of your tensors however. + Args: encoded_inputs (:class:`~transformers.BatchEncoding`, list of :class:`~transformers.BatchEncoding`, :obj:`Dict[str, List[int]]`, :obj:`Dict[str, List[List[int]]` or :obj:`List[Dict[str, List[int]]]`): Tokenized inputs. Can represent one input (:class:`~transformers.BatchEncoding` or :obj:`Dict[str, List[int]]`) or a batch of tokenized inputs (list of :class:`~transformers.BatchEncoding`, `Dict[str, List[List[int]]]` or `List[Dict[str, List[int]]]`) so you can use this method during preprocessing as well as in a PyTorch Dataloader collate function. + Instead of :obj:`List[int]` you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors), see the note above for the return type. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: + * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the @@ -296,14 +272,17 @@ class PrecollatorForGeneAndCellClassification(SpecialTokensMixin): Maximum length of the returned list and optionally padding length (see above). pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple of the provided value. + This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). return_attention_mask (:obj:`bool`, `optional`): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute. + `What are attention masks? <../glossary.html#attention-mask>`__ return_tensors (:obj:`str` or :class:`~transformers.tokenization_utils_base.TensorType`, `optional`): If set, will return tensors instead of list of python integers. Acceptable values are: + * :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects. * :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects. * :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects. @@ -312,13 +291,8 @@ class PrecollatorForGeneAndCellClassification(SpecialTokensMixin): """ # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader - if isinstance(encoded_inputs, (list, tuple)) and isinstance( - encoded_inputs[0], (dict, BatchEncoding) - ): - encoded_inputs = { - key: [example[key] for example in encoded_inputs] - for key in encoded_inputs[0].keys() - } + if isinstance(encoded_inputs, (list, tuple)) and isinstance(encoded_inputs[0], (dict, BatchEncoding)): + encoded_inputs = {key: [example[key] for example in encoded_inputs] for key in encoded_inputs[0].keys()} # The model's main input name, usually `input_ids`, has be passed for padding if self.model_input_names[0] not in encoded_inputs: @@ -412,7 +386,7 @@ class PrecollatorForGeneAndCellClassification(SpecialTokensMixin): def _pad( self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], - class_type, # options: "gene" or "cell" + class_type, # options: "gene" or "cell" max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.LONGEST, pad_to_multiple_of: Optional[int] = None, @@ -420,15 +394,18 @@ class PrecollatorForGeneAndCellClassification(SpecialTokensMixin): ) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) + Args: encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. + - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: + - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. @@ -445,73 +422,46 @@ class PrecollatorForGeneAndCellClassification(SpecialTokensMixin): if padding_strategy == PaddingStrategy.LONGEST: max_length = len(required_input) - if ( - max_length is not None - and pad_to_multiple_of is not None - and (max_length % pad_to_multiple_of != 0) - ): + if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of - needs_to_be_padded = ( - padding_strategy != PaddingStrategy.DO_NOT_PAD - and len(required_input) != max_length - ) + needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length if needs_to_be_padded: difference = max_length - len(required_input) if self.padding_side == "right": if return_attention_mask: - encoded_inputs["attention_mask"] = [1] * len(required_input) + [ - 0 - ] * difference + encoded_inputs["attention_mask"] = [1] * len(required_input) + [0] * difference if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = ( - encoded_inputs["token_type_ids"] - + [self.pad_token_type_id] * difference + encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference ) if "special_tokens_mask" in encoded_inputs: - encoded_inputs["special_tokens_mask"] = ( - encoded_inputs["special_tokens_mask"] + [1] * difference - ) - encoded_inputs[self.model_input_names[0]] = ( - required_input + [self.pad_token_id] * difference - ) + encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference + encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference if class_type == "gene": - encoded_inputs["labels"] = ( - encoded_inputs["labels"] + [-100] * difference - ) + encoded_inputs["labels"] = encoded_inputs["labels"] + [-100] * difference elif self.padding_side == "left": if return_attention_mask: - encoded_inputs["attention_mask"] = [0] * difference + [1] * len( - required_input - ) + encoded_inputs["attention_mask"] = [0] * difference + [1] * len(required_input) if "token_type_ids" in encoded_inputs: - encoded_inputs["token_type_ids"] = [ - self.pad_token_type_id - ] * difference + encoded_inputs["token_type_ids"] + encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ + "token_type_ids" + ] if "special_tokens_mask" in encoded_inputs: - encoded_inputs["special_tokens_mask"] = [ - 1 - ] * difference + encoded_inputs["special_tokens_mask"] - encoded_inputs[self.model_input_names[0]] = [ - self.pad_token_id - ] * difference + required_input + encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] + encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input if class_type == "gene": - encoded_inputs["labels"] = [-100] * difference + encoded_inputs[ - "labels" - ] + encoded_inputs["labels"] = [-100] * difference + encoded_inputs["labels"] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side)) elif return_attention_mask and "attention_mask" not in encoded_inputs: encoded_inputs["attention_mask"] = [1] * len(required_input) - + return encoded_inputs def get_special_tokens_mask( - self, - token_ids_0: List[int], - token_ids_1: Optional[List[int]] = None, - already_has_special_tokens: bool = False, + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding @@ -535,15 +485,11 @@ class PrecollatorForGeneAndCellClassification(SpecialTokensMixin): all_special_ids = self.all_special_ids # cache the property - special_tokens_mask = [ - 1 if token in all_special_ids else 0 for token in token_ids_0 - ] + special_tokens_mask = [1 if token in all_special_ids else 0 for token in token_ids_0] return special_tokens_mask - def convert_tokens_to_ids( - self, tokens: Union[str, List[str]] - ) -> Union[int, List[int]]: + def convert_tokens_to_ids(self, tokens: Union[str, List[str]]) -> Union[int, List[int]]: """ Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the vocabulary. @@ -567,15 +513,14 @@ class PrecollatorForGeneAndCellClassification(SpecialTokensMixin): if token is None: return None - return self.token_dictionary.get(token) + return token_dictionary.get(token) def __len__(self): - return len(self.token_dictionary) + return len(token_dictionary) # collator functions - class DataCollatorForGeneClassification(DataCollatorForTokenClassification): """ Data collator that will dynamically pad the inputs received, as well as the labels. @@ -601,33 +546,25 @@ class DataCollatorForGeneClassification(DataCollatorForTokenClassification): The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions). """ + tokenizer = PrecollatorForGeneAndCellClassification() class_type = "gene" padding: Union[bool, str, PaddingStrategy] = True max_length: Optional[int] = None pad_to_multiple_of: Optional[int] = None label_pad_token_id: int = -100 - + def __init__(self, *args, **kwargs) -> None: - self.token_dictionary = kwargs.pop("token_dictionary") super().__init__( - tokenizer=PrecollatorForGeneAndCellClassification( - token_dictionary=self.token_dictionary - ), + tokenizer=self.tokenizer, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, label_pad_token_id=self.label_pad_token_id, - *args, - **kwargs, - ) + *args, **kwargs) def _prepare_batch(self, features): label_name = "label" if "label" in features[0].keys() else "labels" - labels = ( - [feature[label_name] for feature in features] - if label_name in features[0].keys() - else None - ) + labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None batch = self.tokenizer.pad( features, class_type=self.class_type, @@ -637,31 +574,29 @@ class DataCollatorForGeneClassification(DataCollatorForTokenClassification): return_tensors="pt", ) return batch - + def __call__(self, features): batch = self._prepare_batch(features) batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()} return batch - + class DataCollatorForCellClassification(DataCollatorForGeneClassification): + class_type = "cell" def _prepare_batch(self, features): + batch = super()._prepare_batch(features) - + # Special handling for labels. # Ensure that tensor is created with the correct type # (it should be automatically the case, but let's make sure of it.) first = features[0] if "label" in first and first["label"] is not None: - label = ( - first["label"].item() - if isinstance(first["label"], torch.Tensor) - else first["label"] - ) + label = first["label"].item() if isinstance(first["label"], torch.Tensor) else first["label"] dtype = torch.long if isinstance(label, int) else torch.float batch["labels"] = torch.tensor([f["label"] for f in features], dtype=dtype) - + return batch diff --git a/geneformer/emb_extractor.py b/geneformer/emb_extractor.py index 90a01405d6af4f100df1c9dfa5f18f0474c65f57..bc0ed94742e15c1dcc2a8d5a051d478814fa7ef2 100644 --- a/geneformer/emb_extractor.py +++ b/geneformer/emb_extractor.py @@ -1,419 +1,253 @@ """ Geneformer embedding extractor. -**Description:** - -| Extracts gene or cell embeddings. -| Plots cell embeddings as heatmaps or UMAPs. -| Generates cell state embedding dictionary for use with InSilicoPerturber. - +Usage: + from geneformer import EmbExtractor + embex = EmbExtractor(model_type="CellClassifier", + num_classes=3, + emb_mode="cell", + cell_emb_style="mean_pool", + filter_data={"cell_type":["cardiomyocyte"]}, + max_ncells=1000, + max_ncells_to_plot=1000, + emb_layer=-1, + emb_label=["disease","cell_type"], + labels_to_plot=["disease","cell_type"], + forward_batch_size=100, + nproc=16, + summary_stat=None) + embs = embex.extract_embs("path/to/model", + "path/to/input_data", + "path/to/output_directory", + "output_prefix") + embex.plot_embs(embs=embs, + plot_style="heatmap", + output_directory="path/to/output_directory", + output_prefix="output_prefix") + """ # imports import logging -import pickle -from collections import Counter -from pathlib import Path - import anndata import matplotlib.pyplot as plt +import numpy as np import pandas as pd +import pickle +from tdigest import TDigest import scanpy as sc import seaborn as sns import torch -from tdigest import TDigest -from tqdm.auto import trange +from collections import Counter +from pathlib import Path +from tqdm.notebook import trange +from transformers import BertForMaskedLM, BertForTokenClassification, BertForSequenceClassification -from . import TOKEN_DICTIONARY_FILE -from . import perturber_utils as pu +from .tokenizer import TOKEN_DICTIONARY_FILE -logger = logging.getLogger(__name__) +from .in_silico_perturber import downsample_and_sort, \ + gen_attention_mask, \ + get_model_input_size, \ + load_and_filter, \ + load_model, \ + mean_nonpadding_embs, \ + pad_tensor_list, \ + quant_layers +logger = logging.getLogger(__name__) # extract embeddings -def get_embs( - model, - filtered_input_data, - emb_mode, - layer_to_quant, - pad_token_id, - forward_batch_size, - token_gene_dict, - special_token=False, - summary_stat=None, - silent=False, -): - model_input_size = pu.get_model_input_size(model) +def get_embs(model, + filtered_input_data, + emb_mode, + layer_to_quant, + pad_token_id, + forward_batch_size, + summary_stat): + + model_input_size = get_model_input_size(model) total_batch_length = len(filtered_input_data) - + if summary_stat is None: embs_list = [] elif summary_stat is not None: - # get # of emb dims - emb_dims = pu.get_model_emb_dims(model) - if emb_mode == "cell": - # initiate tdigests for # of emb dims - embs_tdigests = [TDigest() for _ in range(emb_dims)] - if emb_mode == "gene": - gene_set = list( - { - element - for sublist in filtered_input_data["input_ids"] - for element in sublist - } - ) - # initiate dict with genes as keys and tdigests for # of emb dims as values - embs_tdigests_dict = { - k: [TDigest() for _ in range(emb_dims)] for k in gene_set - } - - # Check if CLS and EOS token is present in the token dictionary - cls_present = any("" in value for value in token_gene_dict.values()) - eos_present = any("" in value for value in token_gene_dict.values()) - if emb_mode == "cls": - assert cls_present, " token missing in token dictionary" - # Check to make sure that the first token of the filtered input data is cls token - gene_token_dict = {v: k for k, v in token_gene_dict.items()} - cls_token_id = gene_token_dict[""] - assert ( - filtered_input_data["input_ids"][0][0] == cls_token_id - ), "First token is not token value" - elif emb_mode == "cell": - if cls_present: - logger.warning( - "CLS token present in token dictionary, excluding from average." - ) - if eos_present: - logger.warning( - "EOS token present in token dictionary, excluding from average." - ) - - overall_max_len = 0 + # test embedding extraction for example cell and extract # emb dims + example = filtered_input_data.select([i for i in range(1)]) + example.set_format(type="torch") + emb_dims = test_emb(model, example["input_ids"], layer_to_quant) + # initiate tdigests for # of emb dims + embs_tdigests = [TDigest() for _ in range(emb_dims)] - for i in trange(0, total_batch_length, forward_batch_size, leave=(not silent)): - max_range = min(i + forward_batch_size, total_batch_length) + for i in trange(0, total_batch_length, forward_batch_size): + max_range = min(i+forward_batch_size, total_batch_length) minibatch = filtered_input_data.select([i for i in range(i, max_range)]) - - max_len = int(max(minibatch["length"])) - original_lens = torch.tensor(minibatch["length"], device="cuda") + max_len = max(minibatch["length"]) + original_lens = torch.tensor(minibatch["length"]).to("cuda") minibatch.set_format(type="torch") input_data_minibatch = minibatch["input_ids"] - input_data_minibatch = pu.pad_tensor_list( - input_data_minibatch, max_len, pad_token_id, model_input_size - ) - + input_data_minibatch = pad_tensor_list(input_data_minibatch, + max_len, + pad_token_id, + model_input_size) + with torch.no_grad(): outputs = model( - input_ids=input_data_minibatch.to("cuda"), - attention_mask=pu.gen_attention_mask(minibatch), + input_ids = input_data_minibatch.to("cuda"), + attention_mask = gen_attention_mask(minibatch) ) embs_i = outputs.hidden_states[layer_to_quant] - + if emb_mode == "cell": - if cls_present: - non_cls_embs = embs_i[:, 1:, :] # Get all layers except the embs - if eos_present: - mean_embs = pu.mean_nonpadding_embs(non_cls_embs, original_lens - 2) - else: - mean_embs = pu.mean_nonpadding_embs(non_cls_embs, original_lens - 1) - else: - mean_embs = pu.mean_nonpadding_embs(embs_i, original_lens) + mean_embs = mean_nonpadding_embs(embs_i, original_lens) if summary_stat is None: - embs_list.append(mean_embs) + embs_list += [mean_embs] elif summary_stat is not None: # update tdigests with current batch for each emb dim - accumulate_tdigests(embs_tdigests, mean_embs, emb_dims) - del mean_embs - elif emb_mode == "gene": - if summary_stat is None: - embs_list.append(embs_i) - elif summary_stat is not None: - for h in trange(len(minibatch)): - length_h = minibatch[h]["length"] - input_ids_h = minibatch[h]["input_ids"][0:length_h] - - # double check dimensions before unsqueezing - embs_i_dim = embs_i.dim() - if embs_i_dim != 3: - logger.error( - f"Embedding tensor should have 3 dimensions, not {embs_i_dim}" - ) - raise - - embs_h = embs_i[h, :, :].unsqueeze(dim=1) - dict_h = dict(zip(input_ids_h, embs_h)) - for k in dict_h.keys(): - accumulate_tdigests( - embs_tdigests_dict[int(k)], dict_h[k], emb_dims - ) - del embs_h - del dict_h - elif emb_mode == "cls": - cls_embs = embs_i[:, 0, :].clone().detach() # CLS token layer - embs_list.append(cls_embs) - del cls_embs - - overall_max_len = max(overall_max_len, max_len) + # note: tdigest batch update known to be slow so updating serially + [embs_tdigests[j].update(mean_embs[i,j].item()) for i in range(mean_embs.size(0)) for j in range(emb_dims)] + del outputs del minibatch del input_data_minibatch del embs_i - - torch.cuda.empty_cache() - + del mean_embs + torch.cuda.empty_cache() + if summary_stat is None: - if (emb_mode == "cell") or (emb_mode == "cls"): - embs_stack = torch.cat(embs_list, dim=0) - elif emb_mode == "gene": - embs_stack = pu.pad_tensor_list( - embs_list, - overall_max_len, - pad_token_id, - model_input_size, - 1, - pu.pad_3d_tensor, - ) - + embs_stack = torch.cat(embs_list) # calculate summary stat embs from approximated tdigests elif summary_stat is not None: - if emb_mode == "cell": - if summary_stat == "mean": - summary_emb_list = tdigest_mean(embs_tdigests, emb_dims) - elif summary_stat == "median": - summary_emb_list = tdigest_median(embs_tdigests, emb_dims) - embs_stack = torch.tensor(summary_emb_list) - elif emb_mode == "gene": - if summary_stat == "mean": - [ - update_tdigest_dict_mean(embs_tdigests_dict, gene, emb_dims) - for gene in embs_tdigests_dict.keys() - ] - elif summary_stat == "median": - [ - update_tdigest_dict_median(embs_tdigests_dict, gene, emb_dims) - for gene in embs_tdigests_dict.keys() - ] - return embs_tdigests_dict + if summary_stat == "mean": + summary_emb_list = [embs_tdigests[i].trimmed_mean(0,100) for i in range(emb_dims)] + elif summary_stat == "median": + summary_emb_list = [embs_tdigests[i].percentile(50) for i in range(emb_dims)] + embs_stack = torch.tensor(summary_emb_list) return embs_stack +def test_emb(model, example, layer_to_quant): + with torch.no_grad(): + outputs = model( + input_ids = example.to("cuda") + ) -def accumulate_tdigests(embs_tdigests, mean_embs, emb_dims): - # note: tdigest batch update known to be slow so updating serially - [ - embs_tdigests[j].update(mean_embs[i, j].item()) - for i in range(mean_embs.size(0)) - for j in range(emb_dims) - ] - - -def update_tdigest_dict(embs_tdigests_dict, gene, gene_embs, emb_dims): - embs_tdigests_dict[gene] = accumulate_tdigests( - embs_tdigests_dict[gene], gene_embs, emb_dims - ) - - -def update_tdigest_dict_mean(embs_tdigests_dict, gene, emb_dims): - embs_tdigests_dict[gene] = tdigest_mean(embs_tdigests_dict[gene], emb_dims) - - -def update_tdigest_dict_median(embs_tdigests_dict, gene, emb_dims): - embs_tdigests_dict[gene] = tdigest_median(embs_tdigests_dict[gene], emb_dims) - - -def summarize_gene_embs(h, minibatch, embs_i, embs_tdigests_dict, emb_dims): - length_h = minibatch[h]["length"] - input_ids_h = minibatch[h]["input_ids"][0:length_h] - embs_h = embs_i[h, :, :].unsqueeze(dim=1) - dict_h = dict(zip(input_ids_h, embs_h)) - [ - update_tdigest_dict(embs_tdigests_dict, k, dict_h[k], emb_dims) - for k in dict_h.keys() - ] - - -def tdigest_mean(embs_tdigests, emb_dims): - return [embs_tdigests[i].trimmed_mean(0, 100) for i in range(emb_dims)] - - -def tdigest_median(embs_tdigests, emb_dims): - return [embs_tdigests[i].percentile(50) for i in range(emb_dims)] - + embs_test = outputs.hidden_states[layer_to_quant] + return embs_test.size()[2] -def label_cell_embs(embs, downsampled_data, emb_labels): - embs_df = pd.DataFrame(embs.cpu().numpy()) +def label_embs(embs, downsampled_data, emb_labels): + embs_df = pd.DataFrame(embs.cpu()) if emb_labels is not None: for label in emb_labels: emb_label = downsampled_data[label] embs_df[label] = emb_label return embs_df - -def label_gene_embs(embs, downsampled_data, token_gene_dict): - gene_set = { - element for sublist in downsampled_data["input_ids"] for element in sublist - } - gene_emb_dict = {k: [] for k in gene_set} - for i in range(embs.size()[0]): - length = downsampled_data[i]["length"] - dict_i = dict( - zip( - downsampled_data[i]["input_ids"][0:length], - embs[i, :, :].unsqueeze(dim=1), - ) - ) - for k in dict_i.keys(): - gene_emb_dict[k].append(dict_i[k]) - for k in gene_emb_dict.keys(): - gene_emb_dict[k] = ( - torch.squeeze(torch.mean(torch.stack(gene_emb_dict[k]), dim=0), dim=0) - .cpu() - .numpy() - ) - embs_df = pd.DataFrame(gene_emb_dict).T - embs_df.index = [token_gene_dict[token] for token in embs_df.index] - return embs_df - - -def plot_umap(embs_df, emb_dims, label, output_file, kwargs_dict, seed=0): - only_embs_df = embs_df.iloc[:, :emb_dims] +def plot_umap(embs_df, emb_dims, label, output_file, kwargs_dict): + only_embs_df = embs_df.iloc[:,:emb_dims] only_embs_df.index = pd.RangeIndex(0, only_embs_df.shape[0], name=None).astype(str) - only_embs_df.columns = pd.RangeIndex(0, only_embs_df.shape[1], name=None).astype( - str - ) + only_embs_df.columns = pd.RangeIndex(0, only_embs_df.shape[1], name=None).astype(str) vars_dict = {"embs": only_embs_df.columns} - obs_dict = {"cell_id": list(only_embs_df.index), f"{label}": list(embs_df[label])} + obs_dict = {"cell_id": list(only_embs_df.index), + f"{label}": list(embs_df[label])} adata = anndata.AnnData(X=only_embs_df, obs=obs_dict, var=vars_dict) - sc.tl.pca(adata, svd_solver="arpack") - sc.pp.neighbors(adata, random_state=seed) - sc.tl.umap(adata, random_state=seed) - sns.set(rc={"figure.figsize": (10, 10)}, font_scale=2.3) + sc.tl.pca(adata, svd_solver='arpack') + sc.pp.neighbors(adata) + sc.tl.umap(adata) + sns.set(rc={'figure.figsize':(10,10)}, font_scale=2.3) sns.set_style("white") - default_kwargs_dict = {"size": 200} + default_kwargs_dict = {"palette":"Set2", "size":200} if kwargs_dict is not None: default_kwargs_dict.update(kwargs_dict) - - cats = set(embs_df[label]) - - with plt.rc_context(): - ax = sc.pl.umap(adata, color=label, show=False, **default_kwargs_dict) - ax.legend( - markerscale=2, - frameon=False, - loc="center left", - bbox_to_anchor=(1, 0.5), - ncol=(1 if len(cats) <= 14 else 2 if len(cats) <= 30 else 3), - ) - plt.show() - plt.savefig(output_file, bbox_inches="tight") - + + sc.pl.umap(adata, color=label, save=output_file, **default_kwargs_dict) def gen_heatmap_class_colors(labels, df): - pal = sns.cubehelix_palette( - len(Counter(labels).keys()), - light=0.9, - dark=0.1, - hue=1, - reverse=True, - start=1, - rot=-2, - ) + pal = sns.cubehelix_palette(len(Counter(labels).keys()), light=0.9, dark=0.1, hue=1, reverse=True, start=1, rot=-2) lut = dict(zip(map(str, Counter(labels).keys()), pal)) colors = pd.Series(labels, index=df.index).map(lut) return colors - - + def gen_heatmap_class_dict(classes, label_colors_series): - class_color_dict_df = pd.DataFrame( - {"classes": classes, "color": label_colors_series} - ) + class_color_dict_df = pd.DataFrame({"classes": classes, "color": label_colors_series}) class_color_dict_df = class_color_dict_df.drop_duplicates(subset=["classes"]) - return dict(zip(class_color_dict_df["classes"], class_color_dict_df["color"])) - - + return dict(zip(class_color_dict_df["classes"],class_color_dict_df["color"])) + def make_colorbar(embs_df, label): - labels = list(embs_df[label]) + labels = list(embs_df[label]) + cell_type_colors = gen_heatmap_class_colors(labels, embs_df) label_colors = pd.DataFrame(cell_type_colors, columns=[label]) + for i,row in label_colors.iterrows(): + colors=row[0] + if len(colors)!=3 or any(np.isnan(colors)): + print(i,colors) + + label_colors.isna().sum() + # create dictionary for colors and classes label_color_dict = gen_heatmap_class_dict(labels, label_colors[label]) return label_colors, label_color_dict - - + def plot_heatmap(embs_df, emb_dims, label, output_file, kwargs_dict): sns.set_style("white") sns.set(font_scale=2) plt.figure(figsize=(15, 15), dpi=150) label_colors, label_color_dict = make_colorbar(embs_df, label) - - default_kwargs_dict = { - "row_cluster": True, - "col_cluster": True, - "row_colors": label_colors, - "standard_scale": 1, - "linewidths": 0, - "xticklabels": False, - "yticklabels": False, - "figsize": (15, 15), - "center": 0, - "cmap": "magma", - } - + + default_kwargs_dict = {"row_cluster": True, + "col_cluster": True, + "row_colors": label_colors, + "standard_scale": 1, + "linewidths": 0, + "xticklabels": False, + "yticklabels": False, + "figsize": (15,15), + "center": 0, + "cmap": "magma"} + if kwargs_dict is not None: default_kwargs_dict.update(kwargs_dict) - g = sns.clustermap( - embs_df.iloc[:, 0:emb_dims].apply(pd.to_numeric), **default_kwargs_dict - ) + g = sns.clustermap(embs_df.iloc[:,0:emb_dims].apply(pd.to_numeric), **default_kwargs_dict) plt.setp(g.ax_row_colors.get_xmajorticklabels(), rotation=45, ha="right") for label_color in list(label_color_dict.keys()): - g.ax_col_dendrogram.bar( - 0, 0, color=label_color_dict[label_color], label=label_color, linewidth=0 - ) + g.ax_col_dendrogram.bar(0, 0, color=label_color_dict[label_color], label=label_color, linewidth=0) - g.ax_col_dendrogram.legend( - title=f"{label}", - loc="lower center", - ncol=4, - bbox_to_anchor=(0.5, 1), - facecolor="white", - ) - plt.show() - logger.info(f"Output file: {output_file}") - plt.savefig(output_file, bbox_inches="tight") + l1 = g.ax_col_dendrogram.legend(title=f"{label}", + loc="lower center", + ncol=4, + bbox_to_anchor=(0.5, 1), + facecolor="white") + plt.savefig(output_file, bbox_inches='tight') class EmbExtractor: valid_option_dict = { - "model_type": {"Pretrained", "GeneClassifier", "CellClassifier"}, + "model_type": {"Pretrained","GeneClassifier","CellClassifier"}, "num_classes": {int}, - "emb_mode": {"cls", "cell", "gene"}, + "emb_mode": {"cell","gene"}, "cell_emb_style": {"mean_pool"}, - "gene_emb_style": {"mean_pool"}, "filter_data": {None, dict}, "max_ncells": {None, int}, "emb_layer": {-1, 0}, "emb_label": {None, list}, "labels_to_plot": {None, list}, "forward_batch_size": {int}, - "token_dictionary_file": {None, str}, "nproc": {int}, - "summary_stat": {None, "mean", "median", "exact_mean", "exact_median"}, + "summary_stat": {None, "mean", "median"}, } - def __init__( self, model_type="Pretrained", num_classes=0, - emb_mode="cls", + emb_mode="cell", cell_emb_style="mean_pool", - gene_emb_style="mean_pool", filter_data=None, max_ncells=1000, emb_layer=-1, @@ -422,442 +256,238 @@ class EmbExtractor: forward_batch_size=100, nproc=4, summary_stat=None, - token_dictionary_file=None, + token_dictionary_file=TOKEN_DICTIONARY_FILE, ): """ Initialize embedding extractor. - **Parameters:** - - model_type : {"Pretrained", "GeneClassifier", "CellClassifier"} - | Whether model is the pretrained Geneformer or a fine-tuned gene or cell classifier. + Parameters + ---------- + model_type : {"Pretrained","GeneClassifier","CellClassifier"} + Whether model is the pretrained Geneformer or a fine-tuned gene or cell classifier. num_classes : int - | If model is a gene or cell classifier, specify number of classes it was trained to classify. - | For the pretrained Geneformer model, number of classes is 0 as it is not a classifier. - emb_mode : {"cls", "cell", "gene"} - | Whether to output CLS, cell, or gene embeddings. - | CLS embeddings are cell embeddings derived from the CLS token in the front of the rank value encoding. - cell_emb_style : {"mean_pool"} - | Method for summarizing cell embeddings if not using CLS token. - | Currently only option is mean pooling of gene embeddings for given cell. - gene_emb_style : "mean_pool" - | Method for summarizing gene embeddings. - | Currently only option is mean pooling of contextual gene embeddings for given gene. + If model is a gene or cell classifier, specify number of classes it was trained to classify. + For the pretrained Geneformer model, number of classes is 0 as it is not a classifier. + emb_mode : {"cell","gene"} + Whether to output cell or gene embeddings. + cell_emb_style : "mean_pool" + Method for summarizing cell embeddings. + Currently only option is mean pooling of gene embeddings for given cell. filter_data : None, dict - | Default is to extract embeddings from all input data. - | Otherwise, dictionary specifying .dataset column name and list of values to filter by. + Default is to extract embeddings from all input data. + Otherwise, dictionary specifying .dataset column name and list of values to filter by. max_ncells : None, int - | Maximum number of cells to extract embeddings from. - | Default is 1000 cells randomly sampled from input data. - | If None, will extract embeddings from all cells. + Maximum number of cells to extract embeddings from. + Default is 1000 cells randomly sampled from input data. + If None, will extract embeddings from all cells. emb_layer : {-1, 0} - | Embedding layer to extract. - | The last layer is most specifically weighted to optimize the given learning objective. - | Generally, it is best to extract the 2nd to last layer to get a more general representation. - | -1: 2nd to last layer - | 0: last layer + Embedding layer to extract. + The last layer is most specifically weighted to optimize the given learning objective. + Generally, it is best to extract the 2nd to last layer to get a more general representation. + -1: 2nd to last layer + 0: last layer emb_label : None, list - | List of column name(s) in .dataset to add as labels to embedding output. + List of column name(s) in .dataset to add as labels to embedding output. labels_to_plot : None, list - | Cell labels to plot. - | Shown as color bar in heatmap. - | Shown as cell color in umap. - | Plotting umap requires labels to plot. + Cell labels to plot. + Shown as color bar in heatmap. + Shown as cell color in umap. + Plotting umap requires labels to plot. forward_batch_size : int - | Batch size for forward pass. + Batch size for forward pass. nproc : int - | Number of CPU processes to use. - summary_stat : {None, "mean", "median", "exact_mean", "exact_median"} - | If exact_mean or exact_median, outputs only exact mean or median embedding of input data. - | If mean or median, outputs only approximated mean or median embedding of input data. - | Non-exact recommended if encountering memory constraints while generating goal embedding positions. - | Non-exact is slower but more memory-efficient. + Number of CPU processes to use. + summary_stat : {None, "mean", "median"} + If not None, outputs only approximated mean or median embedding of input data. + Recommended if encountering memory constraints while generating goal embedding positions. + Slower but more memory-efficient. token_dictionary_file : Path - | Default is the Geneformer token dictionary - | Path to pickle file containing token dictionary (Ensembl ID:token). - - **Examples:** - - .. code-block :: python - - >>> from geneformer import EmbExtractor - >>> embex = EmbExtractor(model_type="CellClassifier", - ... num_classes=3, - ... emb_mode="cell", - ... filter_data={"cell_type":["cardiomyocyte"]}, - ... max_ncells=1000, - ... emb_layer=-1, - ... emb_label=["disease", "cell_type"], - ... labels_to_plot=["disease", "cell_type"]) - + Path to pickle file containing token dictionary (Ensembl ID:token). """ self.model_type = model_type self.num_classes = num_classes self.emb_mode = emb_mode self.cell_emb_style = cell_emb_style - self.gene_emb_style = gene_emb_style self.filter_data = filter_data self.max_ncells = max_ncells self.emb_layer = emb_layer self.emb_label = emb_label self.labels_to_plot = labels_to_plot - self.token_dictionary_file = token_dictionary_file self.forward_batch_size = forward_batch_size self.nproc = nproc - if (summary_stat is not None) and ("exact" in summary_stat): - self.summary_stat = None - self.exact_summary_stat = summary_stat - else: - self.summary_stat = summary_stat - self.exact_summary_stat = None + self.summary_stat = summary_stat self.validate_options() # load token dictionary (Ensembl IDs:token) - if self.token_dictionary_file is None: - token_dictionary_file = TOKEN_DICTIONARY_FILE with open(token_dictionary_file, "rb") as f: self.gene_token_dict = pickle.load(f) - self.token_gene_dict = {v: k for k, v in self.gene_token_dict.items()} self.pad_token_id = self.gene_token_dict.get("") - + + def validate_options(self): + # first disallow options under development + if self.emb_mode == "gene": + logger.error( + "Extraction and plotting of gene-level embeddings currently under development. " \ + "Current valid option for 'emb_mode': 'cell'" + ) + raise + # confirm arguments are within valid options and compatible with each other - for attr_name, valid_options in self.valid_option_dict.items(): + for attr_name,valid_options in self.valid_option_dict.items(): attr_value = self.__dict__[attr_name] - if not isinstance(attr_value, (list, dict)): + if type(attr_value) not in {list, dict}: if attr_value in valid_options: continue valid_type = False for option in valid_options: - if (option in [int, list, dict, bool, str]) and isinstance( - attr_value, option - ): + if (option in [int,list,dict]) and isinstance(attr_value, option): valid_type = True break if valid_type: continue logger.error( - f"Invalid option for {attr_name}. " + f"Invalid option for {attr_name}. " \ f"Valid options for {attr_name}: {valid_options}" ) raise - + if self.filter_data is not None: - for key, value in self.filter_data.items(): - if not isinstance(value, list): + for key,value in self.filter_data.items(): + if type(value) != list: self.filter_data[key] = [value] logger.warning( - "Values in filter_data dict must be lists. " - f"Changing {key} value to list ([{value}])." - ) - - def extract_embs( - self, - model_directory, - input_data_file, - output_directory, - output_prefix, - output_torch_embs=False, - cell_state=None, - ): + "Values in filter_data dict must be lists. " \ + f"Changing {key} value to list ([{value}]).") + + def extract_embs(self, + model_directory, + input_data_file, + output_directory, + output_prefix): """ Extract embeddings from input data and save as results in output_directory. - **Parameters:** - + Parameters + ---------- model_directory : Path - | Path to directory containing model + Path to directory containing model input_data_file : Path - | Path to directory containing .dataset inputs + Path to directory containing .dataset inputs output_directory : Path - | Path to directory where embedding data will be saved as csv + Path to directory where embedding data will be saved as csv output_prefix : str - | Prefix for output file - output_torch_embs : bool - | Whether or not to also output the embeddings as a tensor. - | Note, if true, will output embeddings as both dataframe and tensor. - cell_state : dict - | Cell state key and value for state embedding extraction. - - **Examples:** - - .. code-block :: python - - >>> embs = embex.extract_embs("path/to/model", - ... "path/to/input_data", - ... "path/to/output_directory", - ... "output_prefix") - + Prefix for output file """ - filtered_input_data = pu.load_and_filter( - self.filter_data, self.nproc, input_data_file - ) - - # Check to make sure that all the labels exist in the tokenized data: - if self.emb_label is not None: - for label in self.emb_label: - assert label in filtered_input_data.features.keys(), f"Attribute `{label}` not present in dataset features" - - if cell_state is not None: - filtered_input_data = pu.filter_by_dict( - filtered_input_data, cell_state, self.nproc - ) - downsampled_data = pu.downsample_and_sort(filtered_input_data, self.max_ncells) - model = pu.load_model( - self.model_type, self.num_classes, model_directory, mode="eval" - ) - layer_to_quant = pu.quant_layers(model) + self.emb_layer - embs = get_embs( - model=model, - filtered_input_data=downsampled_data, - emb_mode=self.emb_mode, - layer_to_quant=layer_to_quant, - pad_token_id=self.pad_token_id, - forward_batch_size=self.forward_batch_size, - token_gene_dict=self.token_gene_dict, - summary_stat=self.summary_stat, - ) - - if self.emb_mode == "cell": - if self.summary_stat is None: - embs_df = label_cell_embs(embs, downsampled_data, self.emb_label) - elif self.summary_stat is not None: - embs_df = pd.DataFrame(embs.cpu().numpy()).T - elif self.emb_mode == "gene": - if self.summary_stat is None: - embs_df = label_gene_embs(embs, downsampled_data, self.token_gene_dict) - elif self.summary_stat is not None: - embs_df = pd.DataFrame(embs).T - embs_df.index = [self.token_gene_dict[token] for token in embs_df.index] - elif self.emb_mode == "cls": - embs_df = label_cell_embs(embs, downsampled_data, self.emb_label) + filtered_input_data = load_and_filter(self.filter_data, self.nproc, input_data_file) + downsampled_data = downsample_and_sort(filtered_input_data, self.max_ncells) + model = load_model(self.model_type, self.num_classes, model_directory) + layer_to_quant = quant_layers(model)+self.emb_layer + embs = get_embs(model, + downsampled_data, + self.emb_mode, + layer_to_quant, + self.pad_token_id, + self.forward_batch_size, + self.summary_stat) + + if self.summary_stat is None: + embs_df = label_embs(embs, downsampled_data, self.emb_label) + elif self.summary_stat is not None: + embs_df = pd.DataFrame(embs.cpu()).T # save embeddings to output_path - if cell_state is None: - output_path = (Path(output_directory) / output_prefix).with_suffix(".csv") - embs_df.to_csv(output_path) - - if self.exact_summary_stat == "exact_mean": - embs = embs.mean(dim=0) - emb_dims = pu.get_model_emb_dims(model) - embs_df = pd.DataFrame( - embs_df[0 : emb_dims - 1].mean(axis="rows"), - columns=[self.exact_summary_stat], - ).T - elif self.exact_summary_stat == "exact_median": - embs = torch.median(embs, dim=0)[0] - emb_dims = pu.get_model_emb_dims(model) - embs_df = pd.DataFrame( - embs_df[0 : emb_dims - 1].median(axis="rows"), - columns=[self.exact_summary_stat], - ).T - - if cell_state is not None: - return embs - else: - if output_torch_embs: - return embs_df, embs - else: - return embs_df - - def get_state_embs( - self, - cell_states_to_model, - model_directory, - input_data_file, - output_directory, - output_prefix, - output_torch_embs=True, - ): - """ - Extract exact mean or exact median cell state embedding positions from input data and save as results in output_directory. - - **Parameters:** - - cell_states_to_model : None, dict - | Cell states to model if testing perturbations that achieve goal state change. - | Four-item dictionary with keys: state_key, start_state, goal_state, and alt_states - | state_key: key specifying name of column in .dataset that defines the start/goal states - | start_state: value in the state_key column that specifies the start state - | goal_state: value in the state_key column taht specifies the goal end state - | alt_states: list of values in the state_key column that specify the alternate end states - | For example: - | {"state_key": "disease", - | "start_state": "dcm", - | "goal_state": "nf", - | "alt_states": ["hcm", "other1", "other2"]} - model_directory : Path - | Path to directory containing model - input_data_file : Path - | Path to directory containing .dataset inputs - output_directory : Path - | Path to directory where embedding data will be saved as csv - output_prefix : str - | Prefix for output file - output_torch_embs : bool - | Whether or not to also output the embeddings as a tensor. - | Note, if true, will output embeddings as both dataframe and tensor. - - **Outputs** - - | Outputs state_embs_dict for use with in silico perturber. - | Format is dictionary of embedding positions of each cell state to model shifts from/towards. - | Keys specify each possible cell state to model. - | Values are target embedding positions as torch.tensor. - | For example: - | {"nf": emb_nf, - | "hcm": emb_hcm, - | "dcm": emb_dcm, - | "other1": emb_other1, - | "other2": emb_other2} - """ - - pu.validate_cell_states_to_model(cell_states_to_model) - valid_summary_stats = ["exact_mean", "exact_median"] - if self.exact_summary_stat not in valid_summary_stats: - logger.error( - "For extracting state embs, summary_stat in EmbExtractor " - f"must be set to option in {valid_summary_stats}" - ) - raise - - if self.emb_label is not None: - logger.error( - "For extracting state embs, emb_label should be None since labels are based on state embs dict keys." - ) - raise + output_path = (Path(output_directory) / output_prefix).with_suffix(".csv") + embs_df.to_csv(output_path) + + return embs_df + + def plot_embs(self, + embs, + plot_style, + output_directory, + output_prefix, + max_ncells_to_plot=1000, + kwargs_dict=None): - state_embs_dict = dict() - state_key = cell_states_to_model["state_key"] - for k, v in cell_states_to_model.items(): - if k == "state_key": - continue - elif (k == "start_state") or (k == "goal_state"): - state_embs_dict[v] = self.extract_embs( - model_directory, - input_data_file, - output_directory, - output_prefix, - output_torch_embs, - cell_state={state_key: v}, - ) - else: # k == "alt_states" - for alt_state in v: - state_embs_dict[alt_state] = self.extract_embs( - model_directory, - input_data_file, - output_directory, - output_prefix, - output_torch_embs, - cell_state={state_key: alt_state}, - ) - - output_path = (Path(output_directory) / output_prefix).with_suffix(".pkl") - with open(output_path, "wb") as fp: - pickle.dump(state_embs_dict, fp) - - return state_embs_dict - - def plot_embs( - self, - embs, - plot_style, - output_directory, - output_prefix, - max_ncells_to_plot=1000, - kwargs_dict=None, - ): """ Plot embeddings, coloring by provided labels. - **Parameters:** - + Parameters + ---------- embs : pandas.core.frame.DataFrame - | Pandas dataframe containing embeddings output from extract_embs + Pandas dataframe containing embeddings output from extract_embs plot_style : str - | Style of plot: "heatmap" or "umap" + Style of plot: "heatmap" or "umap" output_directory : Path - | Path to directory where plots will be saved as pdf + Path to directory where plots will be saved as pdf output_prefix : str - | Prefix for output file + Prefix for output file max_ncells_to_plot : None, int - | Maximum number of cells to plot. - | Default is 1000 cells randomly sampled from embeddings. - | If None, will plot embeddings from all cells. + Maximum number of cells to plot. + Default is 1000 cells randomly sampled from embeddings. + If None, will plot embeddings from all cells. kwargs_dict : dict - | Dictionary of kwargs to pass to plotting function. - - **Examples:** - - .. code-block :: python - - >>> embex.plot_embs(embs=embs, - ... plot_style="heatmap", - ... output_directory="path/to/output_directory", - ... output_prefix="output_prefix") - + Dictionary of kwargs to pass to plotting function. """ - - if plot_style not in ["heatmap", "umap"]: + + if plot_style not in ["heatmap","umap"]: logger.error( - "Invalid option for 'plot_style'. " "Valid options: {'heatmap','umap'}" + "Invalid option for 'plot_style'. " \ + "Valid options: {'heatmap','umap'}" ) raise - + if (plot_style == "umap") and (self.labels_to_plot is None): - logger.error("Plotting UMAP requires 'labels_to_plot'. ") + logger.error( + "Plotting UMAP requires 'labels_to_plot'. " + ) raise - - if max_ncells_to_plot is not None: - if max_ncells_to_plot > self.max_ncells: - max_ncells_to_plot = self.max_ncells - logger.warning( - "max_ncells_to_plot must be <= max_ncells. " - f"Changing max_ncells_to_plot to {self.max_ncells}." - ) - elif max_ncells_to_plot < self.max_ncells: - embs = embs.sample(max_ncells_to_plot, axis=0) - + + if max_ncells_to_plot > self.max_ncells: + max_ncells_to_plot = self.max_ncells + logger.warning( + "max_ncells_to_plot must be <= max_ncells. " \ + f"Changing max_ncells_to_plot to {self.max_ncells}.") + + if (max_ncells_to_plot is not None) \ + and (max_ncells_to_plot < self.max_ncells): + embs = embs.sample(max_ncells_to_plot, axis=0) + if self.emb_label is None: label_len = 0 else: label_len = len(self.emb_label) - + emb_dims = embs.shape[1] - label_len - + if self.emb_label is None: emb_labels = None else: emb_labels = embs.columns[emb_dims:] - + if plot_style == "umap": for label in self.labels_to_plot: if label not in emb_labels: logger.warning( - f"Label {label} from labels_to_plot " - f"not present in provided embeddings dataframe." - ) + f"Label {label} from labels_to_plot " \ + f"not present in provided embeddings dataframe.") continue - output_prefix_label = output_prefix + f"_umap_{label}" - output_file = ( - Path(output_directory) / output_prefix_label - ).with_suffix(".pdf") - plot_umap(embs, emb_dims, label, output_file, kwargs_dict) - + output_prefix_label = "_" + output_prefix + f"_umap_{label}" + output_file = (Path(output_directory) / output_prefix_label).with_suffix(".pdf") + plot_umap(embs, emb_dims, label, output_prefix_label, kwargs_dict) + if plot_style == "heatmap": for label in self.labels_to_plot: if label not in emb_labels: logger.warning( - f"Label {label} from labels_to_plot " - f"not present in provided embeddings dataframe." - ) + f"Label {label} from labels_to_plot " \ + f"not present in provided embeddings dataframe.") continue output_prefix_label = output_prefix + f"_heatmap_{label}" - output_file = ( - Path(output_directory) / output_prefix_label - ).with_suffix(".pdf") - plot_heatmap(embs, emb_dims, label, output_file, kwargs_dict) + output_file = (Path(output_directory) / output_prefix_label).with_suffix(".pdf") + plot_heatmap(embs, emb_dims, label, output_file, kwargs_dict) \ No newline at end of file diff --git a/geneformer/ensembl_mapping_dict_gc95M.pkl b/geneformer/ensembl_mapping_dict_gc95M.pkl deleted file mode 100644 index 927b80d0145a186925b04b62dac2e1141db88392..0000000000000000000000000000000000000000 --- a/geneformer/ensembl_mapping_dict_gc95M.pkl +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:0819bcbd869cfa14279449b037eb9ed1d09a91310e77bd1a19d927465030e95c -size 3957652 diff --git a/geneformer/evaluation_utils.py b/geneformer/evaluation_utils.py deleted file mode 100644 index b42833785819a08d9afc1cdb84a210c46a9e94ea..0000000000000000000000000000000000000000 --- a/geneformer/evaluation_utils.py +++ /dev/null @@ -1,287 +0,0 @@ -import logging -import math -import pickle -from pathlib import Path - -import matplotlib.pyplot as plt -import numpy as np -import pandas as pd -import seaborn as sns -import torch -from datasets.utils.logging import disable_progress_bar, enable_progress_bar -from sklearn import preprocessing -from sklearn.metrics import ( - ConfusionMatrixDisplay, - accuracy_score, - auc, - confusion_matrix, - f1_score, - roc_curve, -) -from tqdm.auto import trange - -from . import TOKEN_DICTIONARY_FILE -from .emb_extractor import make_colorbar - -logger = logging.getLogger(__name__) - - -def preprocess_classifier_batch(cell_batch, max_len, label_name): - if max_len is None: - max_len = max([len(i) for i in cell_batch["input_ids"]]) - - # load token dictionary (Ensembl IDs:token) - with open(TOKEN_DICTIONARY_FILE, "rb") as f: - gene_token_dict = pickle.load(f) - - def pad_label_example(example): - example[label_name] = np.pad( - example[label_name], - (0, max_len - len(example["input_ids"])), - mode="constant", - constant_values=-100, - ) - example["input_ids"] = np.pad( - example["input_ids"], - (0, max_len - len(example["input_ids"])), - mode="constant", - constant_values=gene_token_dict.get(""), - ) - example["attention_mask"] = ( - example["input_ids"] != gene_token_dict.get("") - ).astype(int) - return example - - padded_batch = cell_batch.map(pad_label_example) - return padded_batch - - -# Function to find the largest number smaller -# than or equal to N that is divisible by k -def find_largest_div(N, K): - rem = N % K - if rem == 0: - return N - else: - return N - rem - - -def vote(logit_list): - m = max(logit_list) - logit_list.index(m) - indices = [i for i, x in enumerate(logit_list) if x == m] - if len(indices) > 1: - return "tie" - else: - return indices[0] - - -def py_softmax(vector): - e = np.exp(vector) - return e / e.sum() - - -def classifier_predict(model, classifier_type, evalset, forward_batch_size): - if classifier_type == "gene": - label_name = "labels" - elif classifier_type == "cell": - label_name = "label" - - predict_logits = [] - predict_labels = [] - model.eval() - - # ensure there is at least 2 examples in each batch to avoid incorrect tensor dims - evalset_len = len(evalset) - max_divisible = find_largest_div(evalset_len, forward_batch_size) - if len(evalset) - max_divisible == 1: - evalset_len = max_divisible - - max_evalset_len = max(evalset.select([i for i in range(evalset_len)])["length"]) - - disable_progress_bar() # disable progress bar for preprocess_classifier_batch mapping - for i in trange(0, evalset_len, forward_batch_size): - max_range = min(i + forward_batch_size, evalset_len) - batch_evalset = evalset.select([i for i in range(i, max_range)]) - padded_batch = preprocess_classifier_batch( - batch_evalset, max_evalset_len, label_name - ) - padded_batch.set_format(type="torch") - - input_data_batch = padded_batch["input_ids"] - attn_msk_batch = padded_batch["attention_mask"] - label_batch = padded_batch[label_name] - with torch.no_grad(): - outputs = model( - input_ids=input_data_batch.to("cuda"), - attention_mask=attn_msk_batch.to("cuda"), - labels=label_batch.to("cuda"), - ) - predict_logits += [torch.squeeze(outputs.logits.to("cpu"))] - predict_labels += [torch.squeeze(label_batch.to("cpu"))] - - enable_progress_bar() - logits_by_cell = torch.cat(predict_logits) - last_dim = len(logits_by_cell.shape) - 1 - all_logits = logits_by_cell.reshape(-1, logits_by_cell.shape[last_dim]) - labels_by_cell = torch.cat(predict_labels) - all_labels = torch.flatten(labels_by_cell) - logit_label_paired = [ - item - for item in list(zip(all_logits.tolist(), all_labels.tolist())) - if item[1] != -100 - ] - y_pred = [vote(item[0]) for item in logit_label_paired] - y_true = [item[1] for item in logit_label_paired] - logits_list = [item[0] for item in logit_label_paired] - return y_pred, y_true, logits_list - - -def get_metrics(y_pred, y_true, logits_list, num_classes, labels): - conf_mat = confusion_matrix(y_true, y_pred, labels=list(labels)) - macro_f1 = f1_score(y_true, y_pred, average="macro") - acc = accuracy_score(y_true, y_pred) - roc_metrics = None # roc metrics not reported for multiclass - if num_classes == 2: - y_score = [py_softmax(item)[1] for item in logits_list] - fpr, tpr, _ = roc_curve(y_true, y_score) - mean_fpr = np.linspace(0, 1, 100) - interp_tpr = np.interp(mean_fpr, fpr, tpr) - interp_tpr[0] = 0.0 - tpr_wt = len(tpr) - roc_auc = auc(fpr, tpr) - roc_metrics = { - "fpr": fpr, - "tpr": tpr, - "interp_tpr": interp_tpr, - "auc": roc_auc, - "tpr_wt": tpr_wt, - } - return conf_mat, macro_f1, acc, roc_metrics - - -# get cross-validated mean and sd metrics -def get_cross_valid_roc_metrics(all_tpr, all_roc_auc, all_tpr_wt): - wts = [count / sum(all_tpr_wt) for count in all_tpr_wt] - all_weighted_tpr = [a * b for a, b in zip(all_tpr, wts)] - mean_tpr = np.sum(all_weighted_tpr, axis=0) - mean_tpr[-1] = 1.0 - all_weighted_roc_auc = [a * b for a, b in zip(all_roc_auc, wts)] - roc_auc = np.sum(all_weighted_roc_auc) - roc_auc_sd = math.sqrt(np.average((all_roc_auc - roc_auc) ** 2, weights=wts)) - return mean_tpr, roc_auc, roc_auc_sd - - -# plot ROC curve -def plot_ROC(roc_metric_dict, model_style_dict, title, output_dir, output_prefix): - fig = plt.figure() - fig.set_size_inches(10, 8) - sns.set(font_scale=2) - sns.set_style("white") - lw = 3 - for model_name in roc_metric_dict.keys(): - mean_fpr = roc_metric_dict[model_name]["mean_fpr"] - mean_tpr = roc_metric_dict[model_name]["mean_tpr"] - roc_auc = roc_metric_dict[model_name]["roc_auc"] - roc_auc_sd = roc_metric_dict[model_name]["roc_auc_sd"] - color = model_style_dict[model_name]["color"] - linestyle = model_style_dict[model_name]["linestyle"] - if len(roc_metric_dict[model_name]["all_roc_auc"]) > 1: - label = f"{model_name} (AUC {roc_auc:0.2f} $\pm$ {roc_auc_sd:0.2f})" - else: - label = f"{model_name} (AUC {roc_auc:0.2f})" - plt.plot( - mean_fpr, mean_tpr, color=color, linestyle=linestyle, lw=lw, label=label - ) - - plt.plot([0, 1], [0, 1], color="black", lw=lw, linestyle="--") - plt.xlim([0.0, 1.0]) - plt.ylim([0.0, 1.05]) - plt.xlabel("False Positive Rate") - plt.ylabel("True Positive Rate") - plt.title(title) - plt.legend(loc="lower right") - - output_file = (Path(output_dir) / f"{output_prefix}_roc").with_suffix(".pdf") - plt.savefig(output_file, bbox_inches="tight") - plt.show() - - -# plot confusion matrix -def plot_confusion_matrix( - conf_mat_df, title, output_dir, output_prefix, custom_class_order -): - fig = plt.figure() - fig.set_size_inches(10, 10) - sns.set(font_scale=1) - sns.set_style("whitegrid", {"axes.grid": False}) - if custom_class_order is not None: - conf_mat_df = conf_mat_df.reindex( - index=custom_class_order, columns=custom_class_order - ) - display_labels = generate_display_labels(conf_mat_df) - conf_mat = preprocessing.normalize(conf_mat_df.to_numpy(), norm="l1") - display = ConfusionMatrixDisplay( - confusion_matrix=conf_mat, display_labels=display_labels - ) - display.plot(cmap="Blues", values_format=".2g") - plt.title(title) - plt.show() - - output_file = (Path(output_dir) / f"{output_prefix}_conf_mat").with_suffix(".pdf") - display.figure_.savefig(output_file, bbox_inches="tight") - - -def generate_display_labels(conf_mat_df): - display_labels = [] - i = 0 - for label in conf_mat_df.index: - display_labels += [f"{label}\nn={conf_mat_df.iloc[i,:].sum():.0f}"] - i = i + 1 - return display_labels - - -def plot_predictions(predictions_df, title, output_dir, output_prefix, kwargs_dict): - sns.set(font_scale=2) - plt.figure(figsize=(10, 10), dpi=150) - label_colors, label_color_dict = make_colorbar(predictions_df, "true") - predictions_df = predictions_df.drop(columns=["true"]) - predict_colors_list = [label_color_dict[label] for label in predictions_df.columns] - predict_label_list = [label for label in predictions_df.columns] - predict_colors = pd.DataFrame( - pd.Series(predict_colors_list, index=predict_label_list), columns=["predicted"] - ) - - default_kwargs_dict = { - "row_cluster": False, - "col_cluster": False, - "row_colors": label_colors, - "col_colors": predict_colors, - "linewidths": 0, - "xticklabels": False, - "yticklabels": False, - "center": 0, - "cmap": "vlag", - } - - if kwargs_dict is not None: - default_kwargs_dict.update(kwargs_dict) - g = sns.clustermap(predictions_df, **default_kwargs_dict) - - plt.setp(g.ax_row_colors.get_xmajorticklabels(), rotation=45, ha="right") - - for label_color in list(label_color_dict.keys()): - g.ax_col_dendrogram.bar( - 0, 0, color=label_color_dict[label_color], label=label_color, linewidth=0 - ) - - g.ax_col_dendrogram.legend( - title=f"{title}", - loc="lower center", - ncol=4, - bbox_to_anchor=(0.5, 1), - facecolor="white", - ) - - output_file = (Path(output_dir) / f"{output_prefix}_pred").with_suffix(".pdf") - plt.savefig(output_file, bbox_inches="tight") diff --git a/geneformer/gene_dictionaries_30m/ensembl_mapping_dict_gc30M.pkl b/geneformer/gene_dictionaries_30m/ensembl_mapping_dict_gc30M.pkl deleted file mode 100644 index a3424146ccf037249ffaa23be6d9b7b8b1a97a61..0000000000000000000000000000000000000000 --- a/geneformer/gene_dictionaries_30m/ensembl_mapping_dict_gc30M.pkl +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:eac0fb0b3007267871b6305ac0003ceba19d4f28d85686cb9067ecf142787869 -size 584125 diff --git a/geneformer/gene_dictionaries_30m/gene_median_dictionary_gc30M.pkl b/geneformer/gene_dictionaries_30m/gene_median_dictionary_gc30M.pkl deleted file mode 100644 index b2bda1a2d693fb4987842d068471d3cc3592686d..0000000000000000000000000000000000000000 --- a/geneformer/gene_dictionaries_30m/gene_median_dictionary_gc30M.pkl +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:b3b589bb5ec75040d05fc44dd6bf0184cf87f3c362cf158d196a6ed3b7fe5f39 -size 940965 diff --git a/geneformer/gene_dictionaries_30m/token_dictionary_gc30M.pkl b/geneformer/gene_dictionaries_30m/token_dictionary_gc30M.pkl deleted file mode 100644 index 9238d4f76c3546871229f31e0794273e7fa9d2c3..0000000000000000000000000000000000000000 --- a/geneformer/gene_dictionaries_30m/token_dictionary_gc30M.pkl +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ab9dc40973fa5224d77b793e2fd114cacf3d08423ed9c4c49caf0ba9c7f218f1 -size 788424 diff --git a/geneformer/gene_median_dictionary.pkl b/geneformer/gene_median_dictionary.pkl new file mode 100644 index 0000000000000000000000000000000000000000..a0b5a900cdca5fd50aa6970e4df4465986a06873 Binary files /dev/null and b/geneformer/gene_median_dictionary.pkl differ diff --git a/geneformer/gene_median_dictionary_gc95M.pkl b/geneformer/gene_median_dictionary_gc95M.pkl deleted file mode 100644 index 76b1e84597b859f1ab323038ed7d1513c38b14e4..0000000000000000000000000000000000000000 --- a/geneformer/gene_median_dictionary_gc95M.pkl +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a51c53f6a771d64508dfaf61529df70e394c53bd20856926117ae5d641a24bf5 -size 1512661 diff --git a/geneformer/gene_dictionaries_30m/gene_name_id_dict_gc30M.pkl b/geneformer/gene_name_id_dict.pkl similarity index 100% rename from geneformer/gene_dictionaries_30m/gene_name_id_dict_gc30M.pkl rename to geneformer/gene_name_id_dict.pkl diff --git a/geneformer/gene_name_id_dict_gc95M.pkl b/geneformer/gene_name_id_dict_gc95M.pkl deleted file mode 100644 index f397337d26d3eddf66cb89183047a9e38cea5988..0000000000000000000000000000000000000000 --- a/geneformer/gene_name_id_dict_gc95M.pkl +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8b0fd0521406ed18b2e341ef0acb5f53aa1a62457a07ca5840e1c142f46dd326 -size 2038812 diff --git a/geneformer/in_silico_perturber.py b/geneformer/in_silico_perturber.py index d2c6601ba67f240f3ef9f17aaf20ed14d73a2b71..b807219a442105a12684d1e37ec5f5a9853443ab 100644 --- a/geneformer/in_silico_perturber.py +++ b/geneformer/in_silico_perturber.py @@ -1,82 +1,615 @@ """ Geneformer in silico perturber. -**Usage:** - -.. code-block :: python - - >>> from geneformer import InSilicoPerturber - >>> isp = InSilicoPerturber(perturb_type="delete", - ... perturb_rank_shift=None, - ... genes_to_perturb="all", - ... model_type="CellClassifier", - ... num_classes=0, - ... emb_mode="cell", - ... filter_data={"cell_type":["cardiomyocyte"]}, - ... cell_states_to_model={"state_key": "disease", "start_state": "dcm", "goal_state": "nf", "alt_states": ["hcm", "other1", "other2"]}, - ... state_embs_dict ={"nf": emb_nf, "hcm": emb_hcm, "dcm": emb_dcm, "other1": emb_other1, "other2": emb_other2}, - ... max_ncells=None, - ... emb_layer=0, - ... forward_batch_size=100, - ... nproc=16) - >>> isp.perturb_data("path/to/model", - ... "path/to/input_data", - ... "path/to/output_directory", - ... "output_prefix") - -**Description:** - -| Performs in silico perturbation (e.g. deletion or overexpression) of defined set of genes or all genes in sample of cells. -| Outputs impact of perturbation on cell or gene embeddings. -| Output files are analyzed with ``in_silico_perturber_stats``. - +Usage: + from geneformer import InSilicoPerturber + isp = InSilicoPerturber(perturb_type="delete", + perturb_rank_shift=None, + genes_to_perturb="all", + combos=0, + anchor_gene=None, + model_type="Pretrained", + num_classes=0, + emb_mode="cell", + cell_emb_style="mean_pool", + filter_data={"cell_type":["cardiomyocyte"]}, + cell_states_to_model={"state_key": "disease", "start_state": "dcm", "goal_state": "nf", "alt_states": ["hcm", "other1", "other2"]}, + max_ncells=None, + emb_layer=-1, + forward_batch_size=100, + nproc=4) + isp.perturb_data("path/to/model", + "path/to/input_data", + "path/to/output_directory", + "output_prefix") """ -import logging - # imports -import os +import itertools as it +import logging +import numpy as np import pickle +import re +import seaborn as sns; sns.set() +import torch from collections import defaultdict +from datasets import Dataset, load_from_disk +from tqdm.notebook import trange +from transformers import BertForMaskedLM, BertForTokenClassification, BertForSequenceClassification -import torch -from datasets import Dataset -from multiprocess import set_start_method -from tqdm.auto import trange +from .tokenizer import TOKEN_DICTIONARY_FILE -from . import TOKEN_DICTIONARY_FILE -from . import perturber_utils as pu -from .emb_extractor import get_embs +logger = logging.getLogger(__name__) -import datasets -datasets.logging.disable_progress_bar() +# load data and filter by defined criteria +def load_and_filter(filter_data, nproc, input_data_file): + data = load_from_disk(input_data_file) + if filter_data is not None: + for key,value in filter_data.items(): + def filter_data_by_criteria(example): + return example[key] in value + data = data.filter(filter_data_by_criteria, num_proc=nproc) + if len(data) == 0: + logger.error( + "No cells remain after filtering. Check filtering criteria.") + raise + data_shuffled = data.shuffle(seed=42) + return data_shuffled + +# load model to GPU +def load_model(model_type, num_classes, model_directory): + if model_type == "Pretrained": + model = BertForMaskedLM.from_pretrained(model_directory, + output_hidden_states=True, + output_attentions=False) + elif model_type == "GeneClassifier": + model = BertForTokenClassification.from_pretrained(model_directory, + num_labels=num_classes, + output_hidden_states=True, + output_attentions=False) + elif model_type == "CellClassifier": + model = BertForSequenceClassification.from_pretrained(model_directory, + num_labels=num_classes, + output_hidden_states=True, + output_attentions=False) + # put the model in eval mode for fwd pass + model.eval() + model = model.to("cuda:0") + return model + +def quant_layers(model): + layer_nums = [] + for name, parameter in model.named_parameters(): + if "layer" in name: + layer_nums += [int(name.split("layer.")[1].split(".")[0])] + return int(max(layer_nums))+1 + +def get_model_input_size(model): + return int(re.split("\(|,",str(model.bert.embeddings.position_embeddings))[1]) + +def flatten_list(megalist): + return [item for sublist in megalist for item in sublist] + +def measure_length(example): + example["length"] = len(example["input_ids"]) + return example + +def downsample_and_sort(data_shuffled, max_ncells): + num_cells = len(data_shuffled) + # if max number of cells is defined, then subsample to this max number + if max_ncells != None: + num_cells = min(max_ncells,num_cells) + data_subset = data_shuffled.select([i for i in range(num_cells)]) + # sort dataset with largest cell first to encounter any memory errors earlier + data_sorted = data_subset.sort("length",reverse=True) + return data_sorted + +def get_possible_states(cell_states_to_model): + possible_states = [] + for key in ["start_state","goal_state"]: + possible_states += [cell_states_to_model[key]] + possible_states += cell_states_to_model.get("alt_states",[]) + return possible_states + +def forward_pass_single_cell(model, example_cell, layer_to_quant): + example_cell.set_format(type="torch") + input_data = example_cell["input_ids"] + with torch.no_grad(): + outputs = model( + input_ids = input_data.to("cuda") + ) + emb = torch.squeeze(outputs.hidden_states[layer_to_quant]) + del outputs + return emb + +def perturb_emb_by_index(emb, indices): + mask = torch.ones(emb.numel(), dtype=torch.bool) + mask[indices] = False + return emb[mask] + +def delete_indices(example): + indices = example["perturb_index"] + if any(isinstance(el, list) for el in indices): + indices = flatten_list(indices) + for index in sorted(indices, reverse=True): + del example["input_ids"][index] + return example + +# for genes_to_perturb = "all" where only genes within cell are overexpressed +def overexpress_indices(example): + indices = example["perturb_index"] + if any(isinstance(el, list) for el in indices): + indices = flatten_list(indices) + for index in sorted(indices, reverse=True): + example["input_ids"].insert(0, example["input_ids"].pop(index)) + return example + +# for genes_to_perturb = list of genes to overexpress that are not necessarily expressed in cell +def overexpress_tokens(example): + # -100 indicates tokens to overexpress are not present in rank value encoding + if example["perturb_index"] != [-100]: + example = delete_indices(example) + [example["input_ids"].insert(0, token) for token in example["tokens_to_perturb"][::-1]] + return example + +def remove_indices_from_emb(emb, indices_to_remove, gene_dim): + # indices_to_remove is list of indices to remove + indices_to_keep = [i for i in range(emb.size()[gene_dim]) if i not in indices_to_remove] + num_dims = emb.dim() + emb_slice = [slice(None) if dim != gene_dim else indices_to_keep for dim in range(num_dims)] + sliced_emb = emb[emb_slice] + return sliced_emb + +def remove_indices_from_emb_batch(emb_batch, list_of_indices_to_remove, gene_dim): + output_batch = torch.stack([ + remove_indices_from_emb(emb_batch[i, :, :], idx, gene_dim-1) for + i, idx in enumerate(list_of_indices_to_remove) + ]) + return output_batch + +def make_perturbation_batch(example_cell, + perturb_type, + tokens_to_perturb, + anchor_token, + combo_lvl, + num_proc): + if tokens_to_perturb == "all": + if perturb_type in ["overexpress","activate"]: + range_start = 1 + elif perturb_type in ["delete","inhibit"]: + range_start = 0 + indices_to_perturb = [[i] for i in range(range_start,example_cell["length"][0])] + elif combo_lvl>0 and (anchor_token is not None): + example_input_ids = example_cell["input_ids "][0] + anchor_index = example_input_ids.index(anchor_token[0]) + indices_to_perturb = [sorted([anchor_index,i]) if i!=anchor_index else None for i in range(example_cell["length"][0])] + indices_to_perturb = [item for item in indices_to_perturb if item is not None] + else: + example_input_ids = example_cell["input_ids"][0] + indices_to_perturb = [[example_input_ids.index(token)] if token in example_input_ids else None for token in tokens_to_perturb] + indices_to_perturb = [item for item in indices_to_perturb if item is not None] + + # create all permutations of combo_lvl of modifiers from tokens_to_perturb + if combo_lvl>0 and (anchor_token is None): + if tokens_to_perturb != "all": + if len(tokens_to_perturb) == combo_lvl+1: + indices_to_perturb = [list(x) for x in it.combinations(indices_to_perturb, combo_lvl+1)] + else: + all_indices = [[i] for i in range(example_cell["length"][0])] + all_indices = [index for index in all_indices if index not in indices_to_perturb] + indices_to_perturb = [[[j for i in indices_to_perturb for j in i], x] for x in all_indices] + length = len(indices_to_perturb) + perturbation_dataset = Dataset.from_dict({"input_ids": example_cell["input_ids"]*length, + "perturb_index": indices_to_perturb}) + if length<400: + num_proc_i = 1 + else: + num_proc_i = num_proc + if perturb_type == "delete": + perturbation_dataset = perturbation_dataset.map(delete_indices, num_proc=num_proc_i) + elif perturb_type == "overexpress": + perturbation_dataset = perturbation_dataset.map(overexpress_indices, num_proc=num_proc_i) + return perturbation_dataset, indices_to_perturb + +# perturbed cell emb removing the activated/overexpressed/inhibited gene emb +# so that only non-perturbed gene embeddings are compared to each other +# in original or perturbed context +def make_comparison_batch(original_emb_batch, indices_to_perturb, perturb_group): + all_embs_list = [] + + # if making comparison batch for multiple perturbations in single cell + if perturb_group == False: + original_emb_list = [original_emb_batch]*len(indices_to_perturb) + # if making comparison batch for single perturbation in multiple cells + elif perturb_group == True: + original_emb_list = original_emb_batch + + + for i in range(len(original_emb_list)): + original_emb = original_emb_list[i] + indices = indices_to_perturb[i] + if indices == [-100]: + all_embs_list += [original_emb[:]] + continue + emb_list = [] + start = 0 + if any(isinstance(el, list) for el in indices): + indices = flatten_list(indices) + for i in sorted(indices): + emb_list += [original_emb[start:i]] + start = i+1 + emb_list += [original_emb[start:]] + all_embs_list += [torch.cat(emb_list)] + len_set = set([emb.size()[0] for emb in all_embs_list]) + if len(len_set) > 1: + max_len = max(len_set) + all_embs_list = [pad_2d_tensor(emb, None, max_len, 0) for emb in all_embs_list] + return torch.stack(all_embs_list) + +# average embedding position of goal cell states +def get_cell_state_avg_embs(model, + filtered_input_data, + cell_states_to_model, + layer_to_quant, + pad_token_id, + forward_batch_size, + num_proc): + + model_input_size = get_model_input_size(model) + possible_states = get_possible_states(cell_states_to_model) + state_embs_dict = dict() + for possible_state in possible_states: + state_embs_list = [] + original_lens = [] + + def filter_states(example): + state_key = cell_states_to_model["state_key"] + return example[state_key] in [possible_state] + filtered_input_data_state = filtered_input_data.filter(filter_states, num_proc=num_proc) + total_batch_length = len(filtered_input_data_state) + if ((total_batch_length-1)/forward_batch_size).is_integer(): + forward_batch_size = forward_batch_size-1 + max_len = max(filtered_input_data_state["length"]) + for i in range(0, total_batch_length, forward_batch_size): + max_range = min(i+forward_batch_size, total_batch_length) + + state_minibatch = filtered_input_data_state.select([i for i in range(i, max_range)]) + state_minibatch.set_format(type="torch") + + input_data_minibatch = state_minibatch["input_ids"] + original_lens += state_minibatch["length"] + input_data_minibatch = pad_tensor_list(input_data_minibatch, + max_len, + pad_token_id, + model_input_size) + attention_mask = gen_attention_mask(state_minibatch, max_len) + + with torch.no_grad(): + outputs = model( + input_ids = input_data_minibatch.to("cuda"), + attention_mask = attention_mask + ) + + state_embs_i = outputs.hidden_states[layer_to_quant] + state_embs_list += [state_embs_i] + del outputs + del state_minibatch + del input_data_minibatch + del attention_mask + del state_embs_i + torch.cuda.empty_cache() -logger = logging.getLogger(__name__) + state_embs = torch.cat(state_embs_list) + avg_state_emb = mean_nonpadding_embs(state_embs, torch.Tensor(original_lens).to("cuda")) + avg_state_emb = torch.mean(avg_state_emb, dim=0, keepdim=True) + state_embs_dict[possible_state] = avg_state_emb + return state_embs_dict + +# quantify cosine similarity of perturbed vs original or alternate states +def quant_cos_sims(model, + perturb_type, + perturbation_batch, + forward_batch_size, + layer_to_quant, + original_emb, + tokens_to_perturb, + indices_to_perturb, + perturb_group, + cell_states_to_model, + state_embs_dict, + pad_token_id, + model_input_size, + nproc): + cos = torch.nn.CosineSimilarity(dim=2) + total_batch_length = len(perturbation_batch) + if ((total_batch_length-1)/forward_batch_size).is_integer(): + forward_batch_size = forward_batch_size-1 + if cell_states_to_model is None: + if perturb_group == False: # (if perturb_group is True, original_emb is filtered_input_data) + comparison_batch = make_comparison_batch(original_emb, indices_to_perturb, perturb_group) + cos_sims = [] + else: + possible_states = get_possible_states(cell_states_to_model) + cos_sims_vs_alt_dict = dict(zip(possible_states,[[] for i in range(len(possible_states))])) + + # measure length of each element in perturbation_batch + perturbation_batch = perturbation_batch.map( + measure_length, num_proc=nproc + ) + + for i in range(0, total_batch_length, forward_batch_size): + max_range = min(i+forward_batch_size, total_batch_length) + + perturbation_minibatch = perturbation_batch.select([i for i in range(i, max_range)]) + # determine if need to pad or truncate batch + minibatch_length_set = set(perturbation_minibatch["length"]) + minibatch_lengths = perturbation_minibatch["length"] + if (len(minibatch_length_set) > 1) or (max(minibatch_length_set) > model_input_size): + needs_pad_or_trunc = True + else: + needs_pad_or_trunc = False + max_len = max(minibatch_length_set) + + if needs_pad_or_trunc == True: + max_len = min(max(minibatch_length_set),model_input_size) + def pad_or_trunc_example(example): + example["input_ids"] = pad_or_truncate_encoding(example["input_ids"], + pad_token_id, + max_len) + return example + perturbation_minibatch = perturbation_minibatch.map(pad_or_trunc_example, num_proc=nproc) + + perturbation_minibatch.set_format(type="torch") + + input_data_minibatch = perturbation_minibatch["input_ids"] + attention_mask = gen_attention_mask(perturbation_minibatch, max_len) + + # extract embeddings for perturbation minibatch + with torch.no_grad(): + outputs = model( + input_ids = input_data_minibatch.to("cuda"), + attention_mask = attention_mask + ) + del input_data_minibatch + del perturbation_minibatch + del attention_mask + + if len(indices_to_perturb)>1: + minibatch_emb = torch.squeeze(outputs.hidden_states[layer_to_quant]) + else: + minibatch_emb = outputs.hidden_states[layer_to_quant] + + if perturb_type == "overexpress": + # remove overexpressed genes to quantify effect on remaining genes + if perturb_group == False: + overexpressed_to_remove = 1 + if perturb_group == True: + overexpressed_to_remove = len(tokens_to_perturb) + minibatch_emb = minibatch_emb[:,overexpressed_to_remove:,:] + + # if quantifying single perturbation in multiple different cells, pad original batch and extract embs + if perturb_group == True: + # pad minibatch of original batch to extract embeddings + # truncate to the (model input size - # tokens to overexpress) to ensure comparability + # since max input size of perturb batch will be reduced by # tokens to overexpress + original_minibatch = original_emb.select([i for i in range(i, max_range)]) + original_minibatch_lengths = original_minibatch["length"] + original_minibatch_length_set = set(original_minibatch["length"]) + + indices_to_perturb_minibatch = indices_to_perturb[i:i+forward_batch_size] + + if perturb_type == "overexpress": + new_max_len = model_input_size - len(tokens_to_perturb) + else: + new_max_len = model_input_size + if (len(original_minibatch_length_set) > 1) or (max(original_minibatch_length_set) > new_max_len): + new_max_len = min(max(original_minibatch_length_set),new_max_len) + def pad_or_trunc_example(example): + example["input_ids"] = pad_or_truncate_encoding(example["input_ids"], pad_token_id, new_max_len) + return example + original_minibatch = original_minibatch.map(pad_or_trunc_example, num_proc=nproc) + original_minibatch.set_format(type="torch") + original_input_data_minibatch = original_minibatch["input_ids"] + attention_mask = gen_attention_mask(original_minibatch, new_max_len) + # extract embeddings for original minibatch + with torch.no_grad(): + original_outputs = model( + input_ids = original_input_data_minibatch.to("cuda"), + attention_mask = attention_mask + ) + del original_input_data_minibatch + del original_minibatch + del attention_mask + + if len(indices_to_perturb)>1: + original_minibatch_emb = torch.squeeze(original_outputs.hidden_states[layer_to_quant]) + else: + original_minibatch_emb = original_outputs.hidden_states[layer_to_quant] + + # embedding dimension of the genes + gene_dim = 1 + # exclude overexpression due to case when genes are not expressed but being overexpressed + if perturb_type != "overexpress": + original_minibatch_emb = remove_indices_from_emb_batch(original_minibatch_emb, + indices_to_perturb_minibatch, + gene_dim) + + # cosine similarity between original emb and batch items + if cell_states_to_model is None: + if perturb_group == False: + minibatch_comparison = comparison_batch[i:max_range] + elif perturb_group == True: + minibatch_comparison = original_minibatch_emb + + cos_sims += [cos(minibatch_emb, minibatch_comparison).to("cpu")] + elif cell_states_to_model is not None: + for state in possible_states: + if perturb_group == False: + cos_sims_vs_alt_dict[state] += cos_sim_shift(original_emb, + minibatch_emb, + state_embs_dict[state], + perturb_group) + elif perturb_group == True: + cos_sims_vs_alt_dict[state] += cos_sim_shift(original_minibatch_emb, + minibatch_emb, + state_embs_dict[state], + perturb_group, + torch.tensor(original_minibatch_lengths, device="cuda"), + torch.tensor(minibatch_lengths, device="cuda")) + del outputs + del minibatch_emb + if cell_states_to_model is None: + del minibatch_comparison + torch.cuda.empty_cache() + if cell_states_to_model is None: + cos_sims_stack = torch.cat(cos_sims) + return cos_sims_stack + else: + for state in possible_states: + cos_sims_vs_alt_dict[state] = torch.cat(cos_sims_vs_alt_dict[state]) + return cos_sims_vs_alt_dict + +# calculate cos sim shift of perturbation with respect to origin and alternative cell +def cos_sim_shift(original_emb, + minibatch_emb, + end_emb, + perturb_group, + original_minibatch_lengths = None, + minibatch_lengths = None): + cos = torch.nn.CosineSimilarity(dim=2) + if not perturb_group: + original_emb = torch.mean(original_emb,dim=0,keepdim=True) + original_emb = original_emb[None, :] + origin_v_end = torch.squeeze(cos(original_emb, end_emb)) #test + else: + if original_emb.size() != minibatch_emb.size(): + logger.error( + f"Embeddings are not the same dimensions. " \ + f"original_emb is {original_emb.size()}. " \ + f"minibatch_emb is {minibatch_emb.size()}. " + ) + raise + if original_minibatch_lengths is not None: + original_emb = mean_nonpadding_embs(original_emb, original_minibatch_lengths) + # else: + # original_emb = torch.mean(original_emb,dim=1,keepdim=True) + + end_emb = torch.unsqueeze(end_emb, 1) + origin_v_end = cos(original_emb, end_emb) + origin_v_end = torch.squeeze(origin_v_end) + if minibatch_lengths is not None: + perturb_emb = mean_nonpadding_embs(minibatch_emb, minibatch_lengths) + else: + perturb_emb = torch.mean(minibatch_emb,dim=1,keepdim=True) + + perturb_v_end = cos(perturb_emb, end_emb) + perturb_v_end = torch.squeeze(perturb_v_end) + return [(perturb_v_end-origin_v_end).to("cpu")] + +def pad_list(input_ids, pad_token_id, max_len): + input_ids = np.pad(input_ids, + (0, max_len-len(input_ids)), + mode='constant', constant_values=pad_token_id) + return input_ids + +def pad_tensor(tensor, pad_token_id, max_len): + tensor = torch.nn.functional.pad(tensor, pad=(0, + max_len - tensor.numel()), + mode='constant', + value=pad_token_id) + return tensor + +def pad_2d_tensor(tensor, pad_token_id, max_len, dim): + if dim == 0: + pad = (0, 0, 0, max_len - tensor.size()[dim]) + elif dim == 1: + pad = (0, max_len - tensor.size()[dim], 0, 0) + tensor = torch.nn.functional.pad(tensor, pad=pad, + mode='constant', + value=pad_token_id) + return tensor + +def pad_or_truncate_encoding(encoding, pad_token_id, max_len): + if isinstance(encoding, torch.Tensor): + encoding_len = tensor.size()[0] + elif isinstance(encoding, list): + encoding_len = len(encoding) + if encoding_len > max_len: + encoding = encoding[0:max_len] + elif encoding_len < max_len: + if isinstance(encoding, torch.Tensor): + encoding = pad_tensor(encoding, pad_token_id, max_len) + elif isinstance(encoding, list): + encoding = pad_list(encoding, pad_token_id, max_len) + return encoding + +# pad list of tensors and convert to tensor +def pad_tensor_list(tensor_list, dynamic_or_constant, pad_token_id, model_input_size): + + # Determine maximum tensor length + if dynamic_or_constant == "dynamic": + max_len = max([tensor.squeeze().numel() for tensor in tensor_list]) + elif type(dynamic_or_constant) == int: + max_len = dynamic_or_constant + else: + max_len = model_input_size + logger.warning( + "If padding style is constant, must provide integer value. " \ + f"Setting padding to max input size {model_input_size}.") + + # pad all tensors to maximum length + tensor_list = [pad_tensor(tensor, pad_token_id, max_len) for tensor in tensor_list] + + # return stacked tensors + return torch.stack(tensor_list) + +def gen_attention_mask(minibatch_encoding, max_len = None): + if max_len == None: + max_len = max(minibatch_encoding["length"]) + original_lens = minibatch_encoding["length"] + attention_mask = [[1]*original_len + +[0]*(max_len - original_len) + if original_len <= max_len + else [1]*max_len + for original_len in original_lens] + return torch.tensor(attention_mask).to("cuda") + +# get cell embeddings excluding padding +def mean_nonpadding_embs(embs, original_lens): + # mask based on padding lengths + mask = torch.arange(embs.size(1)).unsqueeze(0).to("cuda") < original_lens.unsqueeze(1) + + # extend mask dimensions to match the embeddings tensor + mask = mask.unsqueeze(2).expand_as(embs) + + # use the mask to zero out the embeddings in padded areas + masked_embs = embs * mask.float() + + # sum and divide by the lengths to get the mean of non-padding embs + mean_embs = masked_embs.sum(1) / original_lens.view(-1, 1).float() + return mean_embs class InSilicoPerturber: valid_option_dict = { - "perturb_type": {"delete", "overexpress", "inhibit", "activate"}, + "perturb_type": {"delete","overexpress","inhibit","activate"}, "perturb_rank_shift": {None, 1, 2, 3}, "genes_to_perturb": {"all", list}, "combos": {0, 1}, "anchor_gene": {None, str}, - "model_type": {"Pretrained", "GeneClassifier", "CellClassifier", "MTLCellClassifier", "MTLCellClassifier-Quantized"}, + "model_type": {"Pretrained","GeneClassifier","CellClassifier"}, "num_classes": {int}, - "emb_mode": {"cls", "cell", "cls_and_gene", "cell_and_gene"}, + "emb_mode": {"cell","cell_and_gene"}, "cell_emb_style": {"mean_pool"}, "filter_data": {None, dict}, "cell_states_to_model": {None, dict}, - "state_embs_dict": {None, dict}, "max_ncells": {None, int}, "cell_inds_to_perturb": {"all", dict}, "emb_layer": {-1, 0}, - "token_dictionary_file": {None, str}, "forward_batch_size": {int}, "nproc": {int}, } - def __init__( self, perturb_type="delete", @@ -86,113 +619,95 @@ class InSilicoPerturber: anchor_gene=None, model_type="Pretrained", num_classes=0, - emb_mode="cls", + emb_mode="cell", cell_emb_style="mean_pool", filter_data=None, cell_states_to_model=None, - state_embs_dict=None, max_ncells=None, cell_inds_to_perturb="all", emb_layer=-1, forward_batch_size=100, nproc=4, - token_dictionary_file=None, - clear_mem_ncells=1000, + token_dictionary_file=TOKEN_DICTIONARY_FILE, ): """ Initialize in silico perturber. - **Parameters:** - - perturb_type : {"delete", "overexpress", "inhibit", "activate"} - | Type of perturbation. - | "delete": delete gene from rank value encoding - | "overexpress": move gene to front of rank value encoding - | *(TBA)* "inhibit": move gene to lower quartile of rank value encoding - | *(TBA)* "activate": move gene to higher quartile of rank value encoding - *(TBA)* perturb_rank_shift : None, {1,2,3} - | Number of quartiles by which to shift rank of gene. - | For example, if perturb_type="activate" and perturb_rank_shift=1: - | genes in 4th quartile will move to middle of 3rd quartile. - | genes in 3rd quartile will move to middle of 2nd quartile. - | genes in 2nd quartile will move to middle of 1st quartile. - | genes in 1st quartile will move to front of rank value encoding. - | For example, if perturb_type="inhibit" and perturb_rank_shift=2: - | genes in 1st quartile will move to middle of 3rd quartile. - | genes in 2nd quartile will move to middle of 4th quartile. - | genes in 3rd or 4th quartile will move to bottom of rank value encoding. + Parameters + ---------- + perturb_type : {"delete","overexpress","inhibit","activate"} + Type of perturbation. + "delete": delete gene from rank value encoding + "overexpress": move gene to front of rank value encoding + "inhibit": move gene to lower quartile of rank value encoding + "activate": move gene to higher quartile of rank value encoding + perturb_rank_shift : None, {1,2,3} + Number of quartiles by which to shift rank of gene. + For example, if perturb_type="activate" and perturb_rank_shift=1: + genes in 4th quartile will move to middle of 3rd quartile. + genes in 3rd quartile will move to middle of 2nd quartile. + genes in 2nd quartile will move to middle of 1st quartile. + genes in 1st quartile will move to front of rank value encoding. + For example, if perturb_type="inhibit" and perturb_rank_shift=2: + genes in 1st quartile will move to middle of 3rd quartile. + genes in 2nd quartile will move to middle of 4th quartile. + genes in 3rd or 4th quartile will move to bottom of rank value encoding. genes_to_perturb : "all", list - | Default is perturbing each gene detected in each cell in the dataset. - | Otherwise, may provide a list of ENSEMBL IDs of genes to perturb. - | If gene list is provided, then perturber will only test perturbing them all together - | (rather than testing each possible combination of the provided genes). + Default is perturbing each gene detected in each cell in the dataset. + Otherwise, may provide a list of ENSEMBL IDs of genes to perturb. + If gene list is provided, then perturber will only test perturbing them all together + (rather than testing each possible combination of the provided genes). combos : {0,1} - | Whether to perturb genes individually (0) or in pairs (1). + Whether to perturb genes individually (0) or in pairs (1). anchor_gene : None, str - | ENSEMBL ID of gene to use as anchor in combination perturbations. - | For example, if combos=1 and anchor_gene="ENSG00000148400": - | anchor gene will be perturbed in combination with each other gene. - model_type : {"Pretrained", "GeneClassifier", "CellClassifier", "MTLCellClassifier", "MTLCellClassifier-Quantized"} - | Whether model is the pretrained Geneformer or a fine-tuned gene, cell, or multitask cell classifier (+/- 8bit quantization). + ENSEMBL ID of gene to use as anchor in combination perturbations. + For example, if combos=1 and anchor_gene="ENSG00000148400": + anchor gene will be perturbed in combination with each other gene. + model_type : {"Pretrained","GeneClassifier","CellClassifier"} + Whether model is the pretrained Geneformer or a fine-tuned gene or cell classifier. num_classes : int - | If model is a gene or cell classifier, specify number of classes it was trained to classify. - | For the pretrained Geneformer model, number of classes is 0 as it is not a classifier. - emb_mode : {"cls", "cell", "cls_and_gene","cell_and_gene"} - | Whether to output impact of perturbation on CLS token, cell, and/or gene embeddings. - | Gene embedding shifts only available as compared to original cell, not comparing to goal state. + If model is a gene or cell classifier, specify number of classes it was trained to classify. + For the pretrained Geneformer model, number of classes is 0 as it is not a classifier. + emb_mode : {"cell","cell_and_gene"} + Whether to output impact of perturbation on cell and/or gene embeddings. cell_emb_style : "mean_pool" - | Method for summarizing cell embeddings if not using CLS token. - | Currently only option is mean pooling of gene embeddings for given cell. + Method for summarizing cell embeddings. + Currently only option is mean pooling of gene embeddings for given cell. filter_data : None, dict - | Default is to use all input data for in silico perturbation study. - | Otherwise, dictionary specifying .dataset column name and list of values to filter by. - cell_states_to_model : None, dict - | Cell states to model if testing perturbations that achieve goal state change. - | Four-item dictionary with keys: state_key, start_state, goal_state, and alt_states - | state_key: key specifying name of column in .dataset that defines the start/goal states - | start_state: value in the state_key column that specifies the start state - | goal_state: value in the state_key column taht specifies the goal end state - | alt_states: list of values in the state_key column that specify the alternate end states - | For example: {"state_key": "disease", - | "start_state": "dcm", - | "goal_state": "nf", - | "alt_states": ["hcm", "other1", "other2"]} - state_embs_dict : None, dict - | Embedding positions of each cell state to model shifts from/towards (e.g. mean or median). - | Dictionary with keys specifying each possible cell state to model. - | Values are target embedding positions as torch.tensor. - | For example: {"nf": emb_nf, - | "hcm": emb_hcm, - | "dcm": emb_dcm, - | "other1": emb_other1, - | "other2": emb_other2} + Default is to use all input data for in silico perturbation study. + Otherwise, dictionary specifying .dataset column name and list of values to filter by. + cell_states_to_model: None, dict + Cell states to model if testing perturbations that achieve goal state change. + Four-item dictionary with keys: state_key, start_state, goal_state, and alt_states + state_key: key specifying name of column in .dataset that defines the start/goal states + start_state: value in the state_key column that specifies the start state + goal_state: value in the state_key column taht specifies the goal end state + alt_states: list of values in the state_key column that specify the alternate end states + For example: {"state_key": "disease", + "start_state": "dcm", + "goal_state": "nf", + "alt_states": ["hcm", "other1", "other2"]} max_ncells : None, int - | Maximum number of cells to test. - | If None, will test all cells. + Maximum number of cells to test. + If None, will test all cells. cell_inds_to_perturb : "all", list - | Default is perturbing each cell in the dataset. - | Otherwise, may provide a dict of indices of cells to perturb with keys start_ind and end_ind. - | start_ind: the first index to perturb. - | end_ind: the last index to perturb (exclusive). - | Indices will be selected *after* the filter_data criteria and sorting. - | Useful for splitting extremely large datasets across separate GPUs. + Default is perturbing each cell in the dataset. + Otherwise, may provide a dict of indices of cells to perturb with keys start_ind and end_ind. + start_ind: the first index to perturb. + end_ind: the last index to perturb (exclusive). + Indices will be selected *after* the filter_data criteria and sorting. + Useful for splitting extremely large datasets across separate GPUs. emb_layer : {-1, 0} - | Embedding layer to use for quantification. - | 0: last layer (recommended for questions closely tied to model's training objective) - | -1: 2nd to last layer (recommended for questions requiring more general representations) + Embedding layer to use for quantification. + -1: 2nd to last layer (recommended for pretrained Geneformer) + 0: last layer (recommended for cell classifier fine-tuned for disease state) forward_batch_size : int - | Batch size for forward pass. + Batch size for forward pass. nproc : int - | Number of CPU processes to use. + Number of CPU processes to use. token_dictionary_file : Path - | Path to pickle file containing token dictionary (Ensembl ID:token). - clear_mem_ncells : int - | Clear memory every n cells. + Path to pickle file containing token dictionary (Ensembl ID:token). """ - try: - set_start_method("spawn") - except RuntimeError: - pass self.perturb_type = perturb_type self.perturb_rank_shift = perturb_rank_shift @@ -200,56 +715,36 @@ class InSilicoPerturber: self.combos = combos self.anchor_gene = anchor_gene if self.genes_to_perturb == "all": - self.perturb_group = False + self.perturb_group = False else: self.perturb_group = True - if (self.anchor_gene is not None) or (self.combos != 0): + if (self.anchor_gene != None) or (self.combos != 0): self.anchor_gene = None self.combos = 0 logger.warning( - "anchor_gene set to None and combos set to 0. " - "If providing list of genes to perturb, " - "list of genes_to_perturb will be perturbed together, " - "without anchor gene or combinations." - ) + "anchor_gene set to None and combos set to 0. " \ + "If providing list of genes to perturb, " \ + "list of genes_to_perturb will be perturbed together, "\ + "without anchor gene or combinations.") self.model_type = model_type self.num_classes = num_classes self.emb_mode = emb_mode self.cell_emb_style = cell_emb_style self.filter_data = filter_data self.cell_states_to_model = cell_states_to_model - self.state_embs_dict = state_embs_dict self.max_ncells = max_ncells self.cell_inds_to_perturb = cell_inds_to_perturb self.emb_layer = emb_layer self.forward_batch_size = forward_batch_size self.nproc = nproc - self.token_dictionary_file = token_dictionary_file - self.clear_mem_ncells = clear_mem_ncells self.validate_options() # load token dictionary (Ensembl IDs:token) - if self.token_dictionary_file is None: - token_dictionary_file = TOKEN_DICTIONARY_FILE with open(token_dictionary_file, "rb") as f: self.gene_token_dict = pickle.load(f) - self.token_gene_dict = {v: k for k, v in self.gene_token_dict.items()} self.pad_token_id = self.gene_token_dict.get("") - self.cls_token_id = self.gene_token_dict.get("") - self.eos_token_id = self.gene_token_dict.get("") - - # Identify if special token is present in the token dictionary - if (self.cls_token_id is not None) and (self.eos_token_id is not None): - self.special_token = True - else: - if "cls" in self.emb_mode: - logger.error( - f"emb_mode set to {self.emb_mode} but or token not in token dictionary." - ) - raise - self.special_token = False if self.anchor_gene is None: self.anchor_token = None @@ -257,47 +752,36 @@ class InSilicoPerturber: try: self.anchor_token = [self.gene_token_dict[self.anchor_gene]] except KeyError: - logger.error(f"Anchor gene {self.anchor_gene} not in token dictionary.") + logger.error( + f"Anchor gene {self.anchor_gene} not in token dictionary." + ) raise if self.genes_to_perturb == "all": self.tokens_to_perturb = "all" else: - missing_genes = [ - gene - for gene in self.genes_to_perturb - if gene not in self.gene_token_dict.keys() - ] + missing_genes = [gene for gene in self.genes_to_perturb if gene not in self.gene_token_dict.keys()] if len(missing_genes) == len(self.genes_to_perturb): logger.error( "None of the provided genes to perturb are in token dictionary." ) raise - elif len(missing_genes) > 0: + elif len(missing_genes)>0: logger.warning( - f"Genes to perturb {missing_genes} are not in token dictionary." - ) - self.tokens_to_perturb = [ - self.gene_token_dict.get(gene) for gene in self.genes_to_perturb - ] + f"Genes to perturb {missing_genes} are not in token dictionary.") + self.tokens_to_perturb = [self.gene_token_dict.get(gene) for gene in self.genes_to_perturb] def validate_options(self): # first disallow options under development if self.perturb_type in ["inhibit", "activate"]: logger.error( - "In silico inhibition and activation currently under development. " + "In silico inhibition and activation currently under development. " \ "Current valid options for 'perturb_type': 'delete' or 'overexpress'" ) raise - if (self.combos > 0) and (self.anchor_gene is None): - logger.error( - "Combination perturbation without anchor gene is currently under development. " - "Currently, must provide anchor gene for combination perturbation." - ) - raise - + # confirm arguments are within valid options and compatible with each other - for attr_name, valid_options in self.valid_option_dict.items(): + for attr_name,valid_options in self.valid_option_dict.items(): attr_value = self.__dict__[attr_name] if type(attr_value) not in {list, dict}: if attr_value in valid_options: @@ -307,1273 +791,507 @@ class InSilicoPerturber: continue valid_type = False for option in valid_options: - if (option in [bool, int, list, dict, str]) and isinstance( - attr_value, option - ): + if (option in [int,list,dict]) and isinstance(attr_value, option): valid_type = True break if valid_type: continue logger.error( - f"Invalid option for {attr_name}. " + f"Invalid option for {attr_name}. " \ f"Valid options for {attr_name}: {valid_options}" ) raise - - if self.perturb_type in ["delete", "overexpress"]: + + if self.perturb_type in ["delete","overexpress"]: if self.perturb_rank_shift is not None: if self.perturb_type == "delete": logger.warning( - "perturb_rank_shift set to None. " - "If perturb type is delete then gene is deleted entirely " - "rather than shifted by quartile" - ) + "perturb_rank_shift set to None. " \ + "If perturb type is delete then gene is deleted entirely " \ + "rather than shifted by quartile") elif self.perturb_type == "overexpress": logger.warning( - "perturb_rank_shift set to None. " - "If perturb type is overexpress then gene is moved to front " - "of rank value encoding rather than shifted by quartile" - ) + "perturb_rank_shift set to None. " \ + "If perturb type is overexpress then gene is moved to front " \ + "of rank value encoding rather than shifted by quartile") self.perturb_rank_shift = None - + if (self.anchor_gene is not None) and (self.emb_mode == "cell_and_gene"): self.emb_mode = "cell" logger.warning( - "emb_mode set to 'cell'. " - "Currently, analysis with anchor gene " - "only outputs effect on cell embeddings." - ) - + "emb_mode set to 'cell'. " \ + "Currently, analysis with anchor gene " \ + "only outputs effect on cell embeddings.") + if self.cell_states_to_model is not None: - pu.validate_cell_states_to_model(self.cell_states_to_model) - - if self.anchor_gene is not None: - self.anchor_gene = None + if len(self.cell_states_to_model.items()) == 1: logger.warning( - "anchor_gene set to None. " - "Currently, anchor gene not available " - "when modeling multiple cell states." + "The single value dictionary for cell_states_to_model will be " \ + "replaced with a dictionary with named keys for start, goal, and alternate states. " \ + "Please specify state_key, start_state, goal_state, and alt_states " \ + "in the cell_states_to_model dictionary for future use. " \ + "For example, cell_states_to_model={" \ + "'state_key': 'disease', " \ + "'start_state': 'dcm', " \ + "'goal_state': 'nf', " \ + "'alt_states': ['hcm', 'other1', 'other2']}" ) - - if self.state_embs_dict is None: - logger.error( - "state_embs_dict must be provided for mode with cell_states_to_model. " - "Format is dictionary with keys specifying each possible cell state to model. " - "Values are target embedding positions as torch.tensor." - ) - raise - - for state_emb in self.state_embs_dict.values(): - if not torch.is_tensor(state_emb): + for key,value in self.cell_states_to_model.items(): + if (len(value) == 3) and isinstance(value, tuple): + if isinstance(value[0],list) and isinstance(value[1],list) and isinstance(value[2],list): + if len(value[0]) == 1 and len(value[1]) == 1: + all_values = value[0]+value[1]+value[2] + if len(all_values) == len(set(all_values)): + continue + # reformat to the new named key format + state_values = flatten_list(list(self.cell_states_to_model.values())) + self.cell_states_to_model = { + "state_key": list(self.cell_states_to_model.keys())[0], + "start_state": state_values[0][0], + "goal_state": state_values[1][0], + "alt_states": state_values[2:][0] + } + elif set(self.cell_states_to_model.keys()) == {"state_key", "start_state", "goal_state", "alt_states"}: + if (self.cell_states_to_model["state_key"] is None) \ + or (self.cell_states_to_model["start_state"] is None) \ + or (self.cell_states_to_model["goal_state"] is None): logger.error( - "state_embs_dict must be dictionary with values being torch.tensor." - ) + "Please specify 'state_key', 'start_state', and 'goal_state' in cell_states_to_model.") raise + + if self.cell_states_to_model["start_state"] == self.cell_states_to_model["goal_state"]: + logger.error( + "All states must be unique.") + raise + + if self.cell_states_to_model["alt_states"] is not None: + if type(self.cell_states_to_model["alt_states"]) is not list: + logger.error( + "self.cell_states_to_model['alt_states'] must be a list (even if it is one element)." + ) + raise + if len(self.cell_states_to_model["alt_states"])!= len(set(self.cell_states_to_model["alt_states"])): + logger.error( + "All states must be unique.") + raise - keys_absent = [] - for k, v in self.cell_states_to_model.items(): - if (k == "start_state") or (k == "goal_state"): - if v not in self.state_embs_dict.keys(): - keys_absent.append(v) - if k == "alt_states": - for state in v: - if state not in self.state_embs_dict.keys(): - keys_absent.append(state) - if len(keys_absent) > 0: + else: logger.error( - "Each start_state, goal_state, and alt_states in cell_states_to_model " - "must be a key in state_embs_dict with the value being " - "the state's embedding position as torch.tensor. " - f"Missing keys: {keys_absent}" + "cell_states_to_model must only have the following four keys: " \ + "'state_key', 'start_state', 'goal_state', 'alt_states'." \ + "For example, cell_states_to_model={" \ + "'state_key': 'disease', " \ + "'start_state': 'dcm', " \ + "'goal_state': 'nf', " \ + "'alt_states': ['hcm', 'other1', 'other2']}" ) raise - if self.perturb_type in ["inhibit", "activate"]: + if self.anchor_gene is not None: + self.anchor_gene = None + logger.warning( + "anchor_gene set to None. " \ + "Currently, anchor gene not available " \ + "when modeling multiple cell states.") + + if self.perturb_type in ["inhibit","activate"]: if self.perturb_rank_shift is None: logger.error( - "If perturb_type is inhibit or activate then " - "quartile to shift by must be specified." - ) + "If perturb_type is inhibit or activate then " \ + "quartile to shift by must be specified.") raise - + if self.filter_data is not None: - for key, value in self.filter_data.items(): - if not isinstance(value, list): + for key,value in self.filter_data.items(): + if type(value) != list: self.filter_data[key] = [value] logger.warning( - "Values in filter_data dict must be lists. " - f"Changing {key} value to list ([{value}])." - ) - + "Values in filter_data dict must be lists. " \ + f"Changing {key} value to list ([{value}]).") + if self.cell_inds_to_perturb != "all": if set(self.cell_inds_to_perturb.keys()) != {"start", "end"}: logger.error( "If cell_inds_to_perturb is a dictionary, keys must be 'start' and 'end'." ) raise - if ( - self.cell_inds_to_perturb["start"] < 0 - or self.cell_inds_to_perturb["end"] < 0 - ): - logger.error("cell_inds_to_perturb must be positive.") + if self.cell_inds_to_perturb["start"] < 0 or self.cell_inds_to_perturb["end"] < 0: + logger.error( + 'cell_inds_to_perturb must be positive.' + ) raise - def perturb_data( - self, model_directory, input_data_file, output_directory, output_prefix - ): + def perturb_data(self, + model_directory, + input_data_file, + output_directory, + output_prefix): """ Perturb genes in input data and save as results in output_directory. - **Parameters:** - + Parameters + ---------- model_directory : Path - | Path to directory containing model + Path to directory containing model input_data_file : Path - | Path to directory containing .dataset inputs + Path to directory containing .dataset inputs output_directory : Path - | Path to directory where perturbation data will be saved as batched pickle files + Path to directory where perturbation data will be saved as batched pickle files output_prefix : str - | Prefix for output files + Prefix for output files """ - ### format output path ### - output_path_prefix = os.path.join( - output_directory, f"in_silico_{self.perturb_type}_{output_prefix}" - ) - - ### load model and define parameters ### - model = pu.load_model( - self.model_type, self.num_classes, model_directory, mode="eval" - ) - self.max_len = pu.get_model_input_size(model) - layer_to_quant = pu.quant_layers(model) + self.emb_layer - - ### filter input data ### - # general filtering of input data based on filter_data argument - filtered_input_data = pu.load_and_filter( - self.filter_data, self.nproc, input_data_file - ) - - # Ensure emb_mode is cls if first token of the filtered input data is cls token - if self.special_token: - if (filtered_input_data["input_ids"][0][0] == self.cls_token_id) and ( - "cls" not in self.emb_mode - ): - logger.error( - "Emb mode 'cls' or 'cls_and_gene' required when first token is ." - ) - raise - if "cls" in self.emb_mode: - if (filtered_input_data["input_ids"][0][0] != self.cls_token_id) or ( - filtered_input_data["input_ids"][0][-1] != self.eos_token_id - ): + filtered_input_data = load_and_filter(self.filter_data, self.nproc, input_data_file) + model = load_model(self.model_type, self.num_classes, model_directory) + layer_to_quant = quant_layers(model)+self.emb_layer + + if self.cell_states_to_model is None: + state_embs_dict = None + else: + # confirm that all states are valid to prevent futile filtering + state_name = self.cell_states_to_model["state_key"] + state_values = filtered_input_data[state_name] + for value in get_possible_states(self.cell_states_to_model): + if value not in state_values: logger.error( - "Emb mode 'cls' and 'cls_and_gene' require that first token is and last token is ." - ) + f"{value} is not present in the dataset's {state_name} attribute.") raise - - filtered_input_data = self.apply_additional_filters(filtered_input_data) - - if self.perturb_group is True: - if (self.special_token) and ("cls" in self.emb_mode): - self.isp_perturb_set_special( - model, filtered_input_data, layer_to_quant, output_path_prefix - ) - else: - self.isp_perturb_set( - model, filtered_input_data, layer_to_quant, output_path_prefix - ) - else: - if (self.special_token) and ("cls" in self.emb_mode): - self.isp_perturb_all_special( - model, filtered_input_data, layer_to_quant, output_path_prefix - ) + # get dictionary of average cell state embeddings for comparison + downsampled_data = downsample_and_sort(filtered_input_data, self.max_ncells) + state_embs_dict = get_cell_state_avg_embs(model, + downsampled_data, + self.cell_states_to_model, + layer_to_quant, + self.pad_token_id, + self.forward_batch_size, + self.nproc) + # filter for start state cells + start_state = self.cell_states_to_model["start_state"] + def filter_for_origin(example): + return example[state_name] in [start_state] + + filtered_input_data = filtered_input_data.filter(filter_for_origin, num_proc=self.nproc) + + self.in_silico_perturb(model, + filtered_input_data, + layer_to_quant, + state_embs_dict, + output_directory, + output_prefix) + + # determine effect of perturbation on other genes + def in_silico_perturb(self, + model, + filtered_input_data, + layer_to_quant, + state_embs_dict, + output_directory, + output_prefix): + + output_path_prefix = f"{output_directory}in_silico_{self.perturb_type}_{output_prefix}_dict_1Kbatch" + model_input_size = get_model_input_size(model) + + # filter dataset for cells that have tokens to be perturbed + if self.anchor_token is not None: + def if_has_tokens_to_perturb(example): + return (len(set(example["input_ids"]).intersection(self.anchor_token))==len(self.anchor_token)) + filtered_input_data = filtered_input_data.filter(if_has_tokens_to_perturb, num_proc=self.nproc) + if len(filtered_input_data) == 0: + logger.error( + "No cells in dataset contain anchor gene.") + raise else: - self.isp_perturb_all( - model, filtered_input_data, layer_to_quant, output_path_prefix - ) - - def apply_additional_filters(self, filtered_input_data): - # additional filtering of input data dependent on isp mode - if self.cell_states_to_model is not None: - # filter for cells with start_state and log result - filtered_input_data = pu.filter_data_by_start_state( - filtered_input_data, self.cell_states_to_model, self.nproc - ) - + logger.info(f"# cells with anchor gene: {len(filtered_input_data)}") + if (self.tokens_to_perturb != "all") and (self.perturb_type != "overexpress"): - # filter for cells with tokens_to_perturb and log result - filtered_input_data = pu.filter_data_by_tokens_and_log( - filtered_input_data, - self.tokens_to_perturb, - self.nproc, - "genes_to_perturb", - ) - - if self.anchor_token is not None: - # filter for cells with anchor gene and log result - filtered_input_data = pu.filter_data_by_tokens_and_log( - filtered_input_data, self.anchor_token, self.nproc, "anchor_gene" - ) - - # downsample and sort largest to smallest to encounter memory constraints earlier - filtered_input_data = pu.downsample_and_sort( - filtered_input_data, self.max_ncells - ) - - # slice dataset if cells_inds_to_perturb is not "all" + # minimum # genes needed for perturbation test + min_genes = len(self.tokens_to_perturb) + + def if_has_tokens_to_perturb(example): + return (len(set(example["input_ids"]).intersection(self.tokens_to_perturb))>=min_genes) + filtered_input_data = filtered_input_data.filter(if_has_tokens_to_perturb, num_proc=self.nproc) + if len(filtered_input_data) == 0: + logger.error( + "No cells in dataset contain all genes to perturb as a group.") + raise + + cos_sims_dict = defaultdict(list) + pickle_batch = -1 + filtered_input_data = downsample_and_sort(filtered_input_data, self.max_ncells) if self.cell_inds_to_perturb != "all": - filtered_input_data = pu.slice_by_inds_to_perturb( - filtered_input_data, self.cell_inds_to_perturb - ) - - return filtered_input_data - - def isp_perturb_set( - self, - model, - filtered_input_data: Dataset, - layer_to_quant: int, - output_path_prefix: str, - ): - def make_group_perturbation_batch(example): - example_input_ids = example["input_ids"] - example["tokens_to_perturb"] = self.tokens_to_perturb - indices_to_perturb = [ - example_input_ids.index(token) if token in example_input_ids else None - for token in self.tokens_to_perturb - ] - indices_to_perturb = [ - item for item in indices_to_perturb if item is not None - ] - if len(indices_to_perturb) > 0: - example["perturb_index"] = indices_to_perturb - else: - # -100 indicates tokens to overexpress are not present in rank value encoding - example["perturb_index"] = [-100] - if self.perturb_type == "delete": - example = pu.delete_indices(example) - elif self.perturb_type == "overexpress": - example = pu.overexpress_tokens( - example, self.max_len, self.special_token - ) - example["n_overflow"] = pu.calc_n_overflow( - self.max_len, - example["length"], - self.tokens_to_perturb, - indices_to_perturb, - ) - return example - - total_batch_length = len(filtered_input_data) - if self.cell_states_to_model is None: - cos_sims_dict = defaultdict(list) - else: - cos_sims_dict = { - state: defaultdict(list) - for state in pu.get_possible_states(self.cell_states_to_model) - } - - perturbed_data = filtered_input_data.map( - make_group_perturbation_batch, num_proc=self.nproc - ) - - if self.perturb_type == "overexpress": - filtered_input_data = filtered_input_data.add_column( - "n_overflow", perturbed_data["n_overflow"] - ) - # remove overflow genes from original data so that embeddings are comparable - # i.e. if original cell has genes 0:2047 and you want to overexpress new gene 2048, - # then the perturbed cell will be 2048+0:2046 so we compare it to an original cell 0:2046. - # (otherwise we will be modeling the effect of both deleting 2047 and adding 2048, - # rather than only adding 2048) - filtered_input_data = filtered_input_data.map( - pu.truncate_by_n_overflow, num_proc=self.nproc - ) - - if self.emb_mode == "cell_and_gene": - stored_gene_embs_dict = defaultdict(list) - - # iterate through batches - for i in trange(0, total_batch_length, self.forward_batch_size): - max_range = min(i + self.forward_batch_size, total_batch_length) - inds_select = [i for i in range(i, max_range)] - - minibatch = filtered_input_data.select(inds_select) - perturbation_batch = perturbed_data.select(inds_select) - - if self.cell_emb_style == "mean_pool": - full_original_emb = get_embs( - model, - minibatch, - "gene", - layer_to_quant, - self.pad_token_id, - self.forward_batch_size, - token_gene_dict=self.token_gene_dict, - summary_stat=None, - silent=True, - ) - indices_to_perturb = perturbation_batch["perturb_index"] - # remove indices that were perturbed - original_emb = pu.remove_perturbed_indices_set( - full_original_emb, - self.perturb_type, - indices_to_perturb, - self.tokens_to_perturb, - minibatch["length"], - ) - full_perturbation_emb = get_embs( - model, - perturbation_batch, - "gene", - layer_to_quant, - self.pad_token_id, - self.forward_batch_size, - token_gene_dict=self.token_gene_dict, - summary_stat=None, - silent=True, - ) - - # remove overexpressed genes - if self.perturb_type == "overexpress": - perturbation_emb = full_perturbation_emb[ - :, len(self.tokens_to_perturb) :, : - ] - - elif self.perturb_type == "delete": - perturbation_emb = full_perturbation_emb[ - :, : max(perturbation_batch["length"]), : - ] - - n_perturbation_genes = perturbation_emb.size()[1] - - # if no goal states, the cosine similarties are the mean of gene cosine similarities - if ( - self.cell_states_to_model is None - or self.emb_mode == "cell_and_gene" - ): - gene_cos_sims = pu.quant_cos_sims( - perturbation_emb, - original_emb, - self.cell_states_to_model, - self.state_embs_dict, - emb_mode="gene", - ) - - # if there are goal states, the cosine similarities are the cell cosine similarities - if self.cell_states_to_model is not None: - original_cell_emb = pu.mean_nonpadding_embs( - full_original_emb, - torch.tensor(minibatch["length"], device="cuda"), - dim=1, - ) - perturbation_cell_emb = pu.mean_nonpadding_embs( - full_perturbation_emb, - torch.tensor(perturbation_batch["length"], device="cuda"), - dim=1, - ) - cell_cos_sims = pu.quant_cos_sims( - perturbation_cell_emb, - original_cell_emb, - self.cell_states_to_model, - self.state_embs_dict, - emb_mode="cell", - ) - - # get cosine similarities in gene embeddings - # if getting gene embeddings, need gene names - if self.emb_mode == "cell_and_gene": - gene_list = minibatch["input_ids"] - # need to truncate gene_list - gene_list = [ - [g for g in genes if g not in self.tokens_to_perturb][ - :n_perturbation_genes - ] - for genes in gene_list - ] - - for cell_i, genes in enumerate(gene_list): - for gene_j, affected_gene in enumerate(genes): - if len(self.genes_to_perturb) > 1: - tokens_to_perturb = tuple(self.tokens_to_perturb) - else: - tokens_to_perturb = self.tokens_to_perturb[0] - - # fill in the gene cosine similarities - try: - stored_gene_embs_dict[ - (tokens_to_perturb, affected_gene) - ].append(gene_cos_sims[cell_i, gene_j].item()) - except KeyError: - stored_gene_embs_dict[ - (tokens_to_perturb, affected_gene) - ] = gene_cos_sims[cell_i, gene_j].item() + if self.cell_inds_to_perturb["start"] >= len(filtered_input_data): + logger.error("cell_inds_to_perturb['start'] is larger than the filtered dataset.") + raise + if self.cell_inds_to_perturb["end"] > len(filtered_input_data): + logger.warning("cell_inds_to_perturb['end'] is larger than the filtered dataset. \ + Setting to the end of the filtered dataset.") + self.cell_inds_to_perturb["end"] = len(filtered_input_data) + filtered_input_data = filtered_input_data.select([i for i in range(self.cell_inds_to_perturb["start"], self.cell_inds_to_perturb["end"])]) + + # make perturbation batch w/ single perturbation in multiple cells + if self.perturb_group == True: + + def make_group_perturbation_batch(example): + example_input_ids = example["input_ids"] + example["tokens_to_perturb"] = self.tokens_to_perturb + indices_to_perturb = [example_input_ids.index(token) if token in example_input_ids else None for token in self.tokens_to_perturb] + indices_to_perturb = [item for item in indices_to_perturb if item is not None] + if len(indices_to_perturb) > 0: + example["perturb_index"] = indices_to_perturb else: - gene_list = None - + # -100 indicates tokens to overexpress are not present in rank value encoding + example["perturb_index"] = [-100] + if self.perturb_type == "delete": + example = delete_indices(example) + elif self.perturb_type == "overexpress": + example = overexpress_tokens(example) + return example + + perturbation_batch = filtered_input_data.map(make_group_perturbation_batch, num_proc=self.nproc) + indices_to_perturb = perturbation_batch["perturb_index"] + + cos_sims_data = quant_cos_sims(model, + self.perturb_type, + perturbation_batch, + self.forward_batch_size, + layer_to_quant, + filtered_input_data, + self.tokens_to_perturb, + indices_to_perturb, + self.perturb_group, + self.cell_states_to_model, + state_embs_dict, + self.pad_token_id, + model_input_size, + self.nproc) + + perturbed_genes = tuple(self.tokens_to_perturb) + original_lengths = filtered_input_data["length"] if self.cell_states_to_model is None: - # calculate the mean of the gene cosine similarities for cell shift - # tensor of nonpadding lengths for each cell - if self.perturb_type == "overexpress": - # subtract number of genes that were overexpressed - # since they are removed before getting cos sims - n_overexpressed = len(self.tokens_to_perturb) - nonpadding_lens = [ - x - n_overexpressed for x in perturbation_batch["length"] - ] - else: - nonpadding_lens = perturbation_batch["length"] - cos_sims_data = pu.mean_nonpadding_embs( - gene_cos_sims, torch.tensor(nonpadding_lens, device="cuda") - ) - cos_sims_dict = self.update_perturbation_dictionary( - cos_sims_dict, - cos_sims_data, - gene_list, - ) - else: - cos_sims_data = cell_cos_sims - for state in cos_sims_dict.keys(): - cos_sims_dict[state] = self.update_perturbation_dictionary( - cos_sims_dict[state], - cos_sims_data[state], - gene_list, - ) - del minibatch - del perturbation_batch - del original_emb - del perturbation_emb - del cos_sims_data - - torch.cuda.empty_cache() + # update cos sims dict + # key is tuple of (perturbed_gene, affected_gene) + # or (perturbed_genes, "cell_emb") for avg cell emb change + cos_sims_data = cos_sims_data.to("cuda") + max_padded_len = cos_sims_data.shape[1] + for j in range(cos_sims_data.shape[0]): + # remove padding before mean pooling cell embedding + original_length = original_lengths[j] + gene_list = filtered_input_data[j]["input_ids"] + indices_removed = indices_to_perturb[j] + padding_to_remove = max_padded_len - (original_length \ + - len(self.tokens_to_perturb) \ + - len(indices_removed)) + nonpadding_cos_sims_data = cos_sims_data[j][:-padding_to_remove] + cell_cos_sim = torch.mean(nonpadding_cos_sims_data).item() + cos_sims_dict[(perturbed_genes, "cell_emb")] += [cell_cos_sim] - pu.write_perturbation_dictionary( - cos_sims_dict, - f"{output_path_prefix}_cell_embs_dict_{self.tokens_to_perturb}", - ) - - if self.emb_mode == "cell_and_gene": - pu.write_perturbation_dictionary( - stored_gene_embs_dict, - f"{output_path_prefix}_gene_embs_dict_{self.tokens_to_perturb}", - ) - - def isp_perturb_set_special( - self, - model, - filtered_input_data: Dataset, - layer_to_quant: int, - output_path_prefix: str, - ): - def make_group_perturbation_batch(example): - example_input_ids = example["input_ids"] - example["tokens_to_perturb"] = self.tokens_to_perturb - indices_to_perturb = [ - example_input_ids.index(token) if token in example_input_ids else None - for token in self.tokens_to_perturb - ] - indices_to_perturb = [ - item for item in indices_to_perturb if item is not None - ] - if len(indices_to_perturb) > 0: - example["perturb_index"] = indices_to_perturb + if self.emb_mode == "cell_and_gene": + for k in range(cos_sims_data.shape[1]): + cos_sim_value = nonpadding_cos_sims_data[k] + affected_gene = gene_list[k].item() + cos_sims_dict[(perturbed_genes, affected_gene)] += [cos_sim_value.item()] else: - # -100 indicates tokens to overexpress are not present in rank value encoding - example["perturb_index"] = [-100] - if self.perturb_type == "delete": - example = pu.delete_indices(example) - elif self.perturb_type == "overexpress": - example = pu.overexpress_tokens( - example, self.max_len, self.special_token - ) - example["n_overflow"] = pu.calc_n_overflow( - self.max_len, - example["length"], - self.tokens_to_perturb, - indices_to_perturb, - ) - return example - - total_batch_length = len(filtered_input_data) - - - if self.cell_states_to_model is None: - cos_sims_dict = defaultdict(list) - else: - cos_sims_dict = { - state: defaultdict(list) - for state in pu.get_possible_states(self.cell_states_to_model) - } - - perturbed_data = filtered_input_data.map( - make_group_perturbation_batch, num_proc=self.nproc - ) - - if self.perturb_type == "overexpress": - filtered_input_data = filtered_input_data.add_column( - "n_overflow", perturbed_data["n_overflow"] - ) - filtered_input_data = filtered_input_data.map( - pu.truncate_by_n_overflow_special, num_proc=self.nproc - ) - - if self.emb_mode == "cls_and_gene": - stored_gene_embs_dict = defaultdict(list) - - # iterate through batches - for i in trange(0, total_batch_length, self.forward_batch_size): - max_range = min(i + self.forward_batch_size, total_batch_length) - inds_select = [i for i in range(i, max_range)] - - minibatch = filtered_input_data.select(inds_select) - perturbation_batch = perturbed_data.select(inds_select) - - ##### CLS Embedding Mode ##### - if self.emb_mode == "cls": - indices_to_perturb = perturbation_batch["perturb_index"] - - original_cls_emb = get_embs( - model, - minibatch, - "cls", - layer_to_quant, - self.pad_token_id, - self.forward_batch_size, - token_gene_dict=self.token_gene_dict, - summary_stat=None, - silent=True, - ) - - perturbation_cls_emb = get_embs( - model, - perturbation_batch, - "cls", - layer_to_quant, - self.pad_token_id, - self.forward_batch_size, - token_gene_dict=self.token_gene_dict, - summary_stat=None, - silent=True, - ) - - # Calculate the cosine similarities - cls_cos_sims = pu.quant_cos_sims( - perturbation_cls_emb, - original_cls_emb, - self.cell_states_to_model, - self.state_embs_dict, - emb_mode="cell", - ) - - # Update perturbation dictionary - if self.cell_states_to_model is None: - cos_sims_dict = self.update_perturbation_dictionary( - cos_sims_dict, - cls_cos_sims, - gene_list=None, - ) - else: - for state in cos_sims_dict.keys(): - cos_sims_dict[state] = self.update_perturbation_dictionary( - cos_sims_dict[state], - cls_cos_sims[state], - gene_list=None, - ) - - ##### CLS and Gene Embedding Mode ##### - elif self.emb_mode == "cls_and_gene": - full_original_emb = get_embs( - model, - minibatch, - "gene", - layer_to_quant, - self.pad_token_id, - self.forward_batch_size, - self.token_gene_dict, - summary_stat=None, - silent=True, - ) - indices_to_perturb = perturbation_batch["perturb_index"] - - # remove indices that were perturbed - original_emb = pu.remove_perturbed_indices_set( - full_original_emb, - self.perturb_type, - indices_to_perturb, - self.tokens_to_perturb, - minibatch["length"], - ) - - full_perturbation_emb = get_embs( - model, - perturbation_batch, - "gene", - layer_to_quant, - self.pad_token_id, - self.forward_batch_size, - self.token_gene_dict, - summary_stat=None, - silent=True, - ) - - # remove special tokens and padding - original_emb = original_emb[:, 1:-1, :] - if self.perturb_type == "overexpress": - perturbation_emb = full_perturbation_emb[ - :, 1 + len(self.tokens_to_perturb) : -1, : - ] - elif self.perturb_type == "delete": - perturbation_emb = full_perturbation_emb[ - :, 1 : max(perturbation_batch["length"]) - 1, : - ] - - n_perturbation_genes = perturbation_emb.size()[1] - - # truncate the original embedding as necessary - if self.perturb_type == "overexpress": - def calc_perturbation_length(ids): - if ids == [-100]: - return 0 + # update cos sims dict + # key is tuple of (perturbed_genes, "cell_emb") + # value is list of tuples of cos sims for cell_states_to_model + origin_state_key = self.cell_states_to_model["start_state"] + cos_sims_origin = cos_sims_data[origin_state_key] + for j in range(cos_sims_origin.shape[0]): + data_list = [] + for data in list(cos_sims_data.values()): + data_item = data.to("cuda") + data_list += [data_item[j].item()] + cos_sims_dict[(perturbed_genes, "cell_emb")] += [tuple(data_list)] + + with open(f"{output_path_prefix}_raw.pickle", "wb") as fp: + pickle.dump(cos_sims_dict, fp) + + # make perturbation batch w/ multiple perturbations in single cell + if self.perturb_group == False: + + for i in trange(len(filtered_input_data)): + example_cell = filtered_input_data.select([i]) + original_emb = forward_pass_single_cell(model, example_cell, layer_to_quant) + gene_list = torch.squeeze(example_cell["input_ids"]) + + # reset to original type to prevent downstream issues due to forward_pass_single_cell modifying as torch format in place + example_cell = filtered_input_data.select([i]) + + if self.anchor_token is None: + for combo_lvl in range(self.combos+1): + perturbation_batch, indices_to_perturb = make_perturbation_batch(example_cell, + self.perturb_type, + self.tokens_to_perturb, + self.anchor_token, + combo_lvl, + self.nproc) + cos_sims_data = quant_cos_sims(model, + self.perturb_type, + perturbation_batch, + self.forward_batch_size, + layer_to_quant, + original_emb, + self.tokens_to_perturb, + indices_to_perturb, + self.perturb_group, + self.cell_states_to_model, + state_embs_dict, + self.pad_token_id, + model_input_size, + self.nproc) + + if self.cell_states_to_model is None: + # update cos sims dict + # key is tuple of (perturbed_gene, affected_gene) + # or (perturbed_gene, "cell_emb") for avg cell emb change + cos_sims_data = cos_sims_data.to("cuda") + for j in range(cos_sims_data.shape[0]): + if self.tokens_to_perturb != "all": + j_index = torch.tensor(indices_to_perturb[j]) + if j_index.shape[0]>1: + j_index = torch.squeeze(j_index) + else: + j_index = torch.tensor([j]) + perturbed_gene = torch.index_select(gene_list, 0, j_index) + + if perturbed_gene.shape[0]==1: + perturbed_gene = perturbed_gene.item() + elif perturbed_gene.shape[0]>1: + perturbed_gene = tuple(perturbed_gene.tolist()) + + cell_cos_sim = torch.mean(cos_sims_data[j]).item() + cos_sims_dict[(perturbed_gene, "cell_emb")] += [cell_cos_sim] + + # not_j_index = list(set(i for i in range(gene_list.shape[0])).difference(j_index)) + # gene_list_j = torch.index_select(gene_list, 0, j_index) + if self.emb_mode == "cell_and_gene": + for k in range(cos_sims_data.shape[1]): + cos_sim_value = cos_sims_data[j][k] + affected_gene = gene_list[k].item() + cos_sims_dict[(perturbed_gene, affected_gene)] += [cos_sim_value.item()] else: - return len(ids) - - max_tensor_size = max([length - calc_perturbation_length(ids) - 2 for length, ids in zip(minibatch["length"], indices_to_perturb)]) - - max_n_overflow = max(minibatch["n_overflow"]) - if max_n_overflow > 0 and perturbation_emb.size()[1] < original_emb.size()[1]: - original_emb = original_emb[:, 0 : perturbation_emb.size()[1], :] - elif perturbation_emb.size()[1] < original_emb.size()[1]: - original_emb = original_emb[:, 0:max_tensor_size, :] - - gene_cos_sims = pu.quant_cos_sims( - perturbation_emb, - original_emb, - self.cell_states_to_model, - self.state_embs_dict, - emb_mode="gene", - ) - - # get cls emb - original_cls_emb = full_original_emb[:, 0, :] - perturbation_cls_emb = full_perturbation_emb[:, 0, :] - - cls_cos_sims = pu.quant_cos_sims( - perturbation_cls_emb, - original_cls_emb, - self.cell_states_to_model, - self.state_embs_dict, - emb_mode="cell", - ) - - # get cosine similarities in gene embeddings - # since getting gene embeddings, need gene names - - gene_list = minibatch["input_ids"] - # need to truncate gene_list - genes_to_exclude = self.tokens_to_perturb + [ - self.cls_token_id, - self.eos_token_id, - ] - gene_list = [ - [g for g in genes if g not in genes_to_exclude][ - :n_perturbation_genes - ] - for genes in gene_list - ] - - for cell_i, genes in enumerate(gene_list): - for gene_j, affected_gene in enumerate(genes): - if len(self.genes_to_perturb) > 1: - tokens_to_perturb = tuple(self.tokens_to_perturb) + # update cos sims dict + # key is tuple of (perturbed_gene, "cell_emb") + # value is list of tuples of cos sims for cell_states_to_model + origin_state_key = self.cell_states_to_model["start_state"] + cos_sims_origin = cos_sims_data[origin_state_key] + + for j in range(cos_sims_origin.shape[0]): + if (self.tokens_to_perturb != "all") or (combo_lvl>0): + j_index = torch.tensor(indices_to_perturb[j]) + if j_index.shape[0]>1: + j_index = torch.squeeze(j_index) + else: + j_index = torch.tensor([j]) + perturbed_gene = torch.index_select(gene_list, 0, j_index) + + if perturbed_gene.shape[0]==1: + perturbed_gene = perturbed_gene.item() + elif perturbed_gene.shape[0]>1: + perturbed_gene = tuple(perturbed_gene.tolist()) + + data_list = [] + for data in list(cos_sims_data.values()): + data_item = data.to("cuda") + cell_data = torch.mean(data_item[j]).item() + data_list += [cell_data] + cos_sims_dict[(perturbed_gene, "cell_emb")] += [tuple(data_list)] + + elif self.anchor_token is not None: + perturbation_batch, indices_to_perturb = make_perturbation_batch(example_cell, + self.perturb_type, + self.tokens_to_perturb, + None, # first run without anchor token to test individual gene perturbations + 0, + self.nproc) + cos_sims_data = quant_cos_sims(model, + self.perturb_type, + perturbation_batch, + self.forward_batch_size, + layer_to_quant, + original_emb, + self.tokens_to_perturb, + indices_to_perturb, + self.perturb_group, + self.cell_states_to_model, + state_embs_dict, + self.pad_token_id, + model_input_size, + self.nproc) + cos_sims_data = cos_sims_data.to("cuda") + + combo_perturbation_batch, combo_indices_to_perturb = make_perturbation_batch(example_cell, + self.perturb_type, + self.tokens_to_perturb, + self.anchor_token, + 1, + self.nproc) + combo_cos_sims_data = quant_cos_sims(model, + self.perturb_type, + combo_perturbation_batch, + self.forward_batch_size, + layer_to_quant, + original_emb, + self.tokens_to_perturb, + combo_indices_to_perturb, + self.perturb_group, + self.cell_states_to_model, + state_embs_dict, + self.pad_token_id, + model_input_size, + self.nproc) + combo_cos_sims_data = combo_cos_sims_data.to("cuda") + + # update cos sims dict + # key is tuple of (perturbed_gene, "cell_emb") for avg cell emb change + anchor_index = example_cell["input_ids"][0].index(self.anchor_token[0]) + anchor_cell_cos_sim = torch.mean(cos_sims_data[anchor_index]).item() + non_anchor_indices = [k for k in range(cos_sims_data.shape[0]) if k != anchor_index] + cos_sims_data = cos_sims_data[non_anchor_indices,:] + + for j in range(cos_sims_data.shape[0]): + + if j 1: - perturbed_genes = tuple(self.tokens_to_perturb) - else: - perturbed_genes = self.tokens_to_perturb[0] - - # if cell embeddings, can just append - # shape will be (batch size, 1) - cos_sims_data = torch.squeeze(cos_sims_data).tolist() - - # handle case of single cell left - if not isinstance(cos_sims_data, list): - cos_sims_data = [cos_sims_data] - - cos_sims_dict[(perturbed_genes, "cell_emb")] += cos_sims_data - - else: - for i, cos in enumerate(cos_sims_data.tolist()): - cos_sims_dict[(gene_list[i], "cell_emb")].append(cos) - - return cos_sims_dict + # save remainder cells + with open(f"{output_path_prefix}{pickle_batch}_raw.pickle", "wb") as fp: + pickle.dump(cos_sims_dict, fp) \ No newline at end of file diff --git a/geneformer/in_silico_perturber_stats.py b/geneformer/in_silico_perturber_stats.py index 9ec98a8caee4e4ca623c5ecc7c18c36210806cce..60e76c1aea01a8b39210d4a2f29dd1cc23f8592d 100644 --- a/geneformer/in_silico_perturber_stats.py +++ b/geneformer/in_silico_perturber_stats.py @@ -1,179 +1,104 @@ """ Geneformer in silico perturber stats generator. -**Usage:** - -.. code-block :: python - - >>> from geneformer import InSilicoPerturberStats - >>> ispstats = InSilicoPerturberStats(mode="goal_state_shift", - ... cell_states_to_model={"state_key": "disease", - ... "start_state": "dcm", - ... "goal_state": "nf", - ... "alt_states": ["hcm", "other1", "other2"]}) - >>> ispstats.get_stats("path/to/input_data", - ... None, - ... "path/to/output_directory", - ... "output_prefix") - -**Description:** - -| Aggregates data or calculates stats for in silico perturbations based on type of statistics specified in InSilicoPerturberStats. -| Input data is raw in silico perturbation results in the form of dictionaries outputted by ``in_silico_perturber``. - +Usage: + from geneformer import InSilicoPerturberStats + ispstats = InSilicoPerturberStats(mode="goal_state_shift", + combos=0, + anchor_gene=None, + cell_states_to_model={"state_key": "disease", + "start_state": "dcm", + "goal_state": "nf", + "alt_states": ["hcm", "other1", "other2"]}) + ispstats.get_stats("path/to/input_data", + None, + "path/to/output_directory", + "output_prefix") """ -import logging import os -import pickle -import random -from pathlib import Path - +import logging import numpy as np import pandas as pd +import pickle +import random import statsmodels.stats.multitest as smt +from pathlib import Path from scipy.stats import ranksums from sklearn.mixture import GaussianMixture -from tqdm.auto import tqdm, trange +from tqdm.notebook import trange, tqdm -from . import ENSEMBL_DICTIONARY_FILE, TOKEN_DICTIONARY_FILE -from .perturber_utils import flatten_list, validate_cell_states_to_model +from .in_silico_perturber import flatten_list -logger = logging.getLogger(__name__) +from .tokenizer import TOKEN_DICTIONARY_FILE + +GENE_NAME_ID_DICTIONARY_FILE = Path(__file__).parent / "gene_name_id_dict.pkl" +logger = logging.getLogger(__name__) # invert dictionary keys/values def invert_dict(dictionary): return {v: k for k, v in dictionary.items()} - -def read_dict(cos_sims_dict, cell_or_gene_emb, anchor_token): - if cell_or_gene_emb == "cell": - cell_emb_dict = { - k: v for k, v in cos_sims_dict.items() if v and "cell_emb" in k - } - return [cell_emb_dict] - elif cell_or_gene_emb == "gene": - if anchor_token is None: - gene_emb_dict = {k: v for k, v in cos_sims_dict.items() if v} - else: - gene_emb_dict = { - k: v for k, v in cos_sims_dict.items() if v and anchor_token == k[0] - } - return [gene_emb_dict] - - # read raw dictionary files -def read_dictionaries( - input_data_directory, - cell_or_gene_emb, - anchor_token, - cell_states_to_model, - pickle_suffix, -): - file_found = False +def read_dictionaries(input_data_directory, cell_or_gene_emb, anchor_token): + file_found = 0 file_path_list = [] - if cell_states_to_model is None: - dict_list = [] - else: - validate_cell_states_to_model(cell_states_to_model) - cell_states_to_model_valid = { - state: value - for state, value in cell_states_to_model.items() - if state != "state_key" - and cell_states_to_model[state] is not None - and cell_states_to_model[state] != [] - } - cell_states_list = [] - # flatten all state values into list - for state in cell_states_to_model_valid: - value = cell_states_to_model_valid[state] - if isinstance(value, list): - cell_states_list += value - else: - cell_states_list.append(value) - state_dict = {state_value: dict() for state_value in cell_states_list} + dict_list = [] for file in os.listdir(input_data_directory): - # process only files with given suffix (e.g. "_raw.pickle") - if file.endswith(pickle_suffix): - file_found = True + # process only _raw.pickle files + if file.endswith("_raw.pickle"): + file_found = 1 file_path_list += [f"{input_data_directory}/{file}"] for file_path in tqdm(file_path_list): with open(file_path, "rb") as fp: cos_sims_dict = pickle.load(fp) - if cell_states_to_model is None: - dict_list += read_dict(cos_sims_dict, cell_or_gene_emb, anchor_token) - else: - for state_value in cell_states_list: - new_dict = read_dict( - cos_sims_dict[state_value], cell_or_gene_emb, anchor_token - )[0] - for key in new_dict: - try: - state_dict[state_value][key] += new_dict[key] - except KeyError: - state_dict[state_value][key] = new_dict[key] - - if not file_found: + if cell_or_gene_emb == "cell": + cell_emb_dict = {k: v for k, + v in cos_sims_dict.items() if v and "cell_emb" in k} + dict_list += [cell_emb_dict] + elif cell_or_gene_emb == "gene": + gene_emb_dict = {k: v for k, + v in cos_sims_dict.items() if v and anchor_token == k[0]} + dict_list += [gene_emb_dict] + if file_found == 0: logger.error( - "No raw data for processing found within provided directory. " - "Please ensure data files end with '{pickle_suffix}'." - ) + "No raw data for processing found within provided directory. " \ + "Please ensure data files end with '_raw.pickle'.") raise - if cell_states_to_model is None: - return dict_list - else: - return state_dict - + return dict_list # get complete gene list -def get_gene_list(dict_list, mode): +def get_gene_list(dict_list,mode): if mode == "cell": position = 0 elif mode == "gene": position = 1 gene_set = set() - if isinstance(dict_list, list): - for dict_i in dict_list: - gene_set.update([k[position] for k, v in dict_i.items() if v]) - elif isinstance(dict_list, dict): - for state, dict_i in dict_list.items(): - gene_set.update([k[position] for k, v in dict_i.items() if v]) - else: - logger.error( - "dict_list should be a list, or if modeling shift to goal states, a dict. " - f"{type(dict_list)} is not the correct format." - ) - raise + for dict_i in dict_list: + gene_set.update([k[position] for k, v in dict_i.items() if v]) gene_list = list(gene_set) if mode == "gene": gene_list.remove("cell_emb") gene_list.sort() return gene_list - def token_tuple_to_ensembl_ids(token_tuple, gene_token_id_dict): - try: - return tuple([gene_token_id_dict.get(i, np.nan) for i in token_tuple]) - except TypeError: - return gene_token_id_dict.get(token_tuple, np.nan) - + return tuple([gene_token_id_dict.get(i, np.nan) for i in token_tuple]) def n_detections(token, dict_list, mode, anchor_token): cos_sim_megalist = [] for dict_i in dict_list: if mode == "cell": - cos_sim_megalist += dict_i.get((token, "cell_emb"), []) + cos_sim_megalist += dict_i.get((token, "cell_emb"),[]) elif mode == "gene": - cos_sim_megalist += dict_i.get((anchor_token, token), []) + cos_sim_megalist += dict_i.get((anchor_token, token),[]) return len(cos_sim_megalist) - def get_fdr(pvalues): return list(smt.multipletests(pvalues, alpha=0.05, method="fdr_bh")[1]) - def get_impact_component(test_value, gaussian_mixture_model): impact_border = gaussian_mixture_model.means_[0][0] nonimpact_border = gaussian_mixture_model.means_[1][0] @@ -189,392 +114,236 @@ def get_impact_component(test_value, gaussian_mixture_model): impact_component = 1 return impact_component - # aggregate data for single perturbation in multiple cells -def isp_aggregate_grouped_perturb(cos_sims_df, dict_list, genes_perturbed): - names = ["Cosine_sim", "Gene"] - cos_sims_full_dfs = [] - if isinstance(genes_perturbed, list): - if len(genes_perturbed) > 1: - gene_ids_df = cos_sims_df.loc[ - np.isin( - [set(idx) for idx in cos_sims_df["Ensembl_ID"]], - set(genes_perturbed), - ), - :, - ] - else: - gene_ids_df = cos_sims_df.loc[ - np.isin(cos_sims_df["Ensembl_ID"], genes_perturbed), : - ] - else: - logger.error( - "aggregate_data is for perturbation of single gene or single group of genes. genes_to_perturb should be formatted as list." - ) - raise - - if gene_ids_df.empty: - logger.error("genes_to_perturb not found in data.") - raise - - tokens = gene_ids_df["Gene"] - symbols = gene_ids_df["Gene_name"] - - for token, symbol in zip(tokens, symbols): - cos_shift_data = [] - for dict_i in dict_list: - cos_shift_data += dict_i.get((token, "cell_emb"), []) - - df = pd.DataFrame(columns=names) - df["Cosine_sim"] = cos_shift_data - df["Gene"] = symbol - cos_sims_full_dfs.append(df) - - return pd.concat(cos_sims_full_dfs) - - -def find(variable, x): - try: - if x in variable: # Test if variable is iterable and contains x - return True - elif x == variable: - return True - except (ValueError, TypeError): - return x == variable # Test if variable is x if non-iterable - - -def isp_aggregate_gene_shifts( - cos_sims_df, dict_list, gene_token_id_dict, gene_id_name_dict, token_dtype -): - cos_shift_data = dict() - for i in trange(cos_sims_df.shape[0]): - token = cos_sims_df["Gene"][i] - for dict_i in dict_list: - if token_dtype == "nontuple": - affected_pairs = [k for k, v in dict_i.items() if k[0] == token] - else: - affected_pairs = [k for k, v in dict_i.items() if find(k[0], token)] - for key in affected_pairs: - if key in cos_shift_data.keys(): - cos_shift_data[key] += dict_i.get(key, []) - else: - cos_shift_data[key] = dict_i.get(key, []) - - cos_data_mean = { - k: [np.mean(v), np.std(v), len(v)] for k, v in cos_shift_data.items() - } - cos_sims_full_df = pd.DataFrame() - cos_sims_full_df["Perturbed"] = [k[0] for k, v in cos_data_mean.items()] - cos_sims_full_df["Gene_name"] = [ - cos_sims_df[cos_sims_df["Gene"] == k[0]]["Gene_name"].item() - for k, v in cos_data_mean.items() - ] - cos_sims_full_df["Ensembl_ID"] = [ - cos_sims_df[cos_sims_df["Gene"] == k[0]]["Ensembl_ID"].item() - for k, v in cos_data_mean.items() - ] - - cos_sims_full_df["Affected"] = [k[1] for k, v in cos_data_mean.items()] - cos_sims_full_df["Affected_gene_name"] = [ - gene_id_name_dict.get(gene_token_id_dict.get(token, np.nan), np.nan) - for token in cos_sims_full_df["Affected"] - ] - cos_sims_full_df["Affected_Ensembl_ID"] = [ - gene_token_id_dict.get(token, np.nan) for token in cos_sims_full_df["Affected"] - ] - cos_sims_full_df["Cosine_sim_mean"] = [v[0] for k, v in cos_data_mean.items()] - cos_sims_full_df["Cosine_sim_stdev"] = [v[1] for k, v in cos_data_mean.items()] - cos_sims_full_df["N_Detections"] = [v[2] for k, v in cos_data_mean.items()] - - specific_val = "cell_emb" - cos_sims_full_df["temp"] = list(cos_sims_full_df["Affected"] == specific_val) - # reorder so cell embs are at the top and all are subordered by magnitude of cosine sim - cos_sims_full_df = cos_sims_full_df.sort_values( - by=(["temp", "Cosine_sim_mean"]), ascending=[False, True] - ).drop("temp", axis=1) - - return cos_sims_full_df +def isp_aggregate_grouped_perturb(cos_sims_df, dict_list): + names=["Cosine_shift"] + cos_sims_full_df = pd.DataFrame(columns=names) + cos_shift_data = [] + token = cos_sims_df["Gene"][0] + for dict_i in dict_list: + cos_shift_data += dict_i.get((token, "cell_emb"),[]) + cos_sims_full_df["Cosine_shift"] = cos_shift_data + return cos_sims_full_df # stats comparing cos sim shifts towards goal state of test perturbations vs random perturbations -def isp_stats_to_goal_state( - cos_sims_df, result_dict, cell_states_to_model, genes_perturbed -): - if ( - ("alt_states" not in cell_states_to_model.keys()) - or (len(cell_states_to_model["alt_states"]) == 0) - or (cell_states_to_model["alt_states"] == [None]) - ): +def isp_stats_to_goal_state(cos_sims_df, dict_list, cell_states_to_model, genes_perturbed): + cell_state_key = cell_states_to_model["start_state"] + if ("alt_states" not in cell_states_to_model.keys()) \ + or (len(cell_states_to_model["alt_states"]) == 0) \ + or (cell_states_to_model["alt_states"] == [None]): alt_end_state_exists = False - elif (len(cell_states_to_model["alt_states"]) > 0) and ( - cell_states_to_model["alt_states"] != [None] - ): + elif (len(cell_states_to_model["alt_states"]) > 0) and (cell_states_to_model["alt_states"] != [None]): alt_end_state_exists = True - + # for single perturbation in multiple cells, there are no random perturbations to compare to if genes_perturbed != "all": - cos_sims_full_df = pd.DataFrame() - - cos_shift_data_end = [] + names=["Shift_to_goal_end", + "Shift_to_alt_end"] + if alt_end_state_exists == False: + names.remove("Shift_to_alt_end") + cos_sims_full_df = pd.DataFrame(columns=names) + + cos_shift_data = [] token = cos_sims_df["Gene"][0] - cos_shift_data_end += result_dict[cell_states_to_model["goal_state"]].get( - (token, "cell_emb"), [] - ) - cos_sims_full_df["Shift_to_goal_end"] = [np.mean(cos_shift_data_end)] - if alt_end_state_exists is True: - for alt_state in cell_states_to_model["alt_states"]: - cos_shift_data_alt_state = [] - cos_shift_data_alt_state += result_dict.get(alt_state).get( - (token, "cell_emb"), [] - ) - cos_sims_full_df[f"Shift_to_alt_end_{alt_state}"] = [ - np.mean(cos_shift_data_alt_state) - ] - + for dict_i in dict_list: + cos_shift_data += dict_i.get((token, "cell_emb"),[]) + if alt_end_state_exists == False: + cos_sims_full_df["Shift_to_goal_end"] = [goal_end for start_state,goal_end in cos_shift_data] + if alt_end_state_exists == True: + cos_sims_full_df["Shift_to_goal_end"] = [goal_end for start_state,goal_end,alt_end in cos_shift_data] + cos_sims_full_df["Shift_to_alt_end"] = [alt_end for start_state,goal_end,alt_end in cos_shift_data] + # sort by shift to desired state - cos_sims_full_df = cos_sims_full_df.sort_values( - by=["Shift_to_goal_end"], ascending=[False] - ) - return cos_sims_full_df - + cos_sims_full_df = cos_sims_full_df.sort_values(by=["Shift_to_goal_end"], + ascending=[False]) + return cos_sims_full_df + elif genes_perturbed == "all": - goal_end_random_megalist = [] - if alt_end_state_exists is True: - alt_end_state_random_dict = { - alt_state: [] for alt_state in cell_states_to_model["alt_states"] - } + random_tuples = [] for i in trange(cos_sims_df.shape[0]): token = cos_sims_df["Gene"][i] - goal_end_random_megalist += result_dict[ - cell_states_to_model["goal_state"] - ].get((token, "cell_emb"), []) - if alt_end_state_exists is True: - for alt_state in cell_states_to_model["alt_states"]: - alt_end_state_random_dict[alt_state] += result_dict[alt_state].get( - (token, "cell_emb"), [] - ) + for dict_i in dict_list: + random_tuples += dict_i.get((token, "cell_emb"),[]) + + if alt_end_state_exists == False: + goal_end_random_megalist = [goal_end for start_state,goal_end in random_tuples] + elif alt_end_state_exists == True: + goal_end_random_megalist = [goal_end for start_state,goal_end,alt_end in random_tuples] + alt_end_random_megalist = [alt_end for start_state,goal_end,alt_end in random_tuples] # downsample to improve speed of ranksums if len(goal_end_random_megalist) > 100_000: random.seed(42) - goal_end_random_megalist = random.sample( - goal_end_random_megalist, k=100_000 - ) - if alt_end_state_exists is True: - for alt_state in cell_states_to_model["alt_states"]: - if len(alt_end_state_random_dict[alt_state]) > 100_000: - random.seed(42) - alt_end_state_random_dict[alt_state] = random.sample( - alt_end_state_random_dict[alt_state], k=100_000 - ) - - names = [ - "Gene", - "Gene_name", - "Ensembl_ID", - "Shift_to_goal_end", - "Goal_end_vs_random_pval", - ] - if alt_end_state_exists is True: - [ - names.append(f"Shift_to_alt_end_{alt_state}") - for alt_state in cell_states_to_model["alt_states"] - ] - names.append(names.pop(names.index("Goal_end_vs_random_pval"))) - [ - names.append(f"Alt_end_vs_random_pval_{alt_state}") - for alt_state in cell_states_to_model["alt_states"] - ] + goal_end_random_megalist = random.sample(goal_end_random_megalist, k=100_000) + if alt_end_state_exists == True: + if len(alt_end_random_megalist) > 100_000: + random.seed(42) + alt_end_random_megalist = random.sample(alt_end_random_megalist, k=100_000) + + names=["Gene", + "Gene_name", + "Ensembl_ID", + "Shift_to_goal_end", + "Shift_to_alt_end", + "Goal_end_vs_random_pval", + "Alt_end_vs_random_pval"] + if alt_end_state_exists == False: + names.remove("Shift_to_alt_end") + names.remove("Alt_end_vs_random_pval") cos_sims_full_df = pd.DataFrame(columns=names) - n_detections_dict = dict() for i in trange(cos_sims_df.shape[0]): token = cos_sims_df["Gene"][i] name = cos_sims_df["Gene_name"][i] ensembl_id = cos_sims_df["Ensembl_ID"][i] - goal_end_cos_sim_megalist = result_dict[ - cell_states_to_model["goal_state"] - ].get((token, "cell_emb"), []) - n_detections_dict[token] = len(goal_end_cos_sim_megalist) - mean_goal_end = np.mean(goal_end_cos_sim_megalist) - pval_goal_end = ranksums( - goal_end_random_megalist, goal_end_cos_sim_megalist - ).pvalue - - if alt_end_state_exists is True: - alt_end_state_dict = { - alt_state: [] for alt_state in cell_states_to_model["alt_states"] - } - for alt_state in cell_states_to_model["alt_states"]: - alt_end_state_dict[alt_state] = result_dict[alt_state].get( - (token, "cell_emb"), [] - ) - alt_end_state_dict[f"{alt_state}_mean"] = np.mean( - alt_end_state_dict[alt_state] - ) - alt_end_state_dict[f"{alt_state}_pval"] = ranksums( - alt_end_state_random_dict[alt_state], - alt_end_state_dict[alt_state], - ).pvalue + cos_shift_data = [] - results_dict = dict() - results_dict["Gene"] = token - results_dict["Gene_name"] = name - results_dict["Ensembl_ID"] = ensembl_id - results_dict["Shift_to_goal_end"] = mean_goal_end - results_dict["Goal_end_vs_random_pval"] = pval_goal_end - if alt_end_state_exists is True: - for alt_state in cell_states_to_model["alt_states"]: - results_dict[f"Shift_to_alt_end_{alt_state}"] = alt_end_state_dict[ - f"{alt_state}_mean" - ] - results_dict[ - f"Alt_end_vs_random_pval_{alt_state}" - ] = alt_end_state_dict[f"{alt_state}_pval"] + for dict_i in dict_list: + cos_shift_data += dict_i.get((token, "cell_emb"),[]) - cos_sims_df_i = pd.DataFrame(results_dict, index=[i]) - cos_sims_full_df = pd.concat([cos_sims_full_df, cos_sims_df_i]) + if alt_end_state_exists == False: + goal_end_cos_sim_megalist = [goal_end for start_state,goal_end in cos_shift_data] + elif alt_end_state_exists == True: + goal_end_cos_sim_megalist = [goal_end for start_state,goal_end,alt_end in cos_shift_data] + alt_end_cos_sim_megalist = [alt_end for start_state,goal_end,alt_end in cos_shift_data] + mean_alt_end = np.mean(alt_end_cos_sim_megalist) + pval_alt_end = ranksums(alt_end_random_megalist,alt_end_cos_sim_megalist).pvalue - cos_sims_full_df["Goal_end_FDR"] = get_fdr( - list(cos_sims_full_df["Goal_end_vs_random_pval"]) - ) - if alt_end_state_exists is True: - for alt_state in cell_states_to_model["alt_states"]: - cos_sims_full_df[f"Alt_end_FDR_{alt_state}"] = get_fdr( - list(cos_sims_full_df[f"Alt_end_vs_random_pval_{alt_state}"]) - ) + mean_goal_end = np.mean(goal_end_cos_sim_megalist) + pval_goal_end = ranksums(goal_end_random_megalist,goal_end_cos_sim_megalist).pvalue + + if alt_end_state_exists == False: + data_i = [token, + name, + ensembl_id, + mean_goal_end, + pval_goal_end] + elif alt_end_state_exists == True: + data_i = [token, + name, + ensembl_id, + mean_goal_end, + mean_alt_end, + pval_goal_end, + pval_alt_end] + + cos_sims_df_i = pd.DataFrame(dict(zip(names,data_i)),index=[i]) + cos_sims_full_df = pd.concat([cos_sims_full_df,cos_sims_df_i]) + + cos_sims_full_df["Goal_end_FDR"] = get_fdr(list(cos_sims_full_df["Goal_end_vs_random_pval"])) + if alt_end_state_exists == True: + cos_sims_full_df["Alt_end_FDR"] = get_fdr(list(cos_sims_full_df["Alt_end_vs_random_pval"])) # quantify number of detections of each gene - cos_sims_full_df["N_Detections"] = [ - n_detections_dict[token] for token in cos_sims_full_df["Gene"] - ] - - # sort by shift to desired state - cos_sims_full_df["Sig"] = [ - 1 if fdr < 0.05 else 0 for fdr in cos_sims_full_df["Goal_end_FDR"] - ] - cos_sims_full_df = cos_sims_full_df.sort_values( - by=["Sig", "Shift_to_goal_end", "Goal_end_FDR"], - ascending=[False, False, True], - ) - + cos_sims_full_df["N_Detections"] = [n_detections(i, dict_list, "cell", None) for i in cos_sims_full_df["Gene"]] + + # sort by shift to desired state\ + cos_sims_full_df["Sig"] = [1 if fdr<0.05 else 0 for fdr in cos_sims_full_df["Goal_end_FDR"]] + cos_sims_full_df = cos_sims_full_df.sort_values(by=["Sig", + "Shift_to_goal_end", + "Goal_end_FDR"], + ascending=[False,False,True]) + return cos_sims_full_df - # stats comparing cos sim shifts of test perturbations vs null distribution def isp_stats_vs_null(cos_sims_df, dict_list, null_dict_list): cos_sims_full_df = cos_sims_df.copy() cos_sims_full_df["Test_avg_shift"] = np.zeros(cos_sims_df.shape[0], dtype=float) cos_sims_full_df["Null_avg_shift"] = np.zeros(cos_sims_df.shape[0], dtype=float) - cos_sims_full_df["Test_vs_null_avg_shift"] = np.zeros( - cos_sims_df.shape[0], dtype=float - ) + cos_sims_full_df["Test_vs_null_avg_shift"] = np.zeros(cos_sims_df.shape[0], dtype=float) cos_sims_full_df["Test_vs_null_pval"] = np.zeros(cos_sims_df.shape[0], dtype=float) cos_sims_full_df["Test_vs_null_FDR"] = np.zeros(cos_sims_df.shape[0], dtype=float) - cos_sims_full_df["N_Detections_test"] = np.zeros( - cos_sims_df.shape[0], dtype="uint32" - ) - cos_sims_full_df["N_Detections_null"] = np.zeros( - cos_sims_df.shape[0], dtype="uint32" - ) - + cos_sims_full_df["N_Detections_test"] = np.zeros(cos_sims_df.shape[0], dtype="uint32") + cos_sims_full_df["N_Detections_null"] = np.zeros(cos_sims_df.shape[0], dtype="uint32") + for i in trange(cos_sims_df.shape[0]): token = cos_sims_df["Gene"][i] test_shifts = [] null_shifts = [] - + for dict_i in dict_list: - test_shifts += dict_i.get((token, "cell_emb"), []) + test_shifts += dict_i.get((token, "cell_emb"),[]) for dict_i in null_dict_list: - null_shifts += dict_i.get((token, "cell_emb"), []) - + null_shifts += dict_i.get((token, "cell_emb"),[]) + cos_sims_full_df.loc[i, "Test_avg_shift"] = np.mean(test_shifts) cos_sims_full_df.loc[i, "Null_avg_shift"] = np.mean(null_shifts) - cos_sims_full_df.loc[i, "Test_vs_null_avg_shift"] = np.mean( - test_shifts - ) - np.mean(null_shifts) - cos_sims_full_df.loc[i, "Test_vs_null_pval"] = ranksums( - test_shifts, null_shifts, nan_policy="omit" - ).pvalue - # remove nan values - cos_sims_full_df.Test_vs_null_pval = np.where( - np.isnan(cos_sims_full_df.Test_vs_null_pval), - 1, - cos_sims_full_df.Test_vs_null_pval, - ) + cos_sims_full_df.loc[i, "Test_vs_null_avg_shift"] = np.mean(test_shifts)-np.mean(null_shifts) + cos_sims_full_df.loc[i, "Test_vs_null_pval"] = ranksums(test_shifts, + null_shifts, nan_policy="omit").pvalue + cos_sims_full_df.loc[i, "N_Detections_test"] = len(test_shifts) cos_sims_full_df.loc[i, "N_Detections_null"] = len(null_shifts) - cos_sims_full_df["Test_vs_null_FDR"] = get_fdr( - cos_sims_full_df["Test_vs_null_pval"] - ) - - cos_sims_full_df["Sig"] = [ - 1 if fdr < 0.05 else 0 for fdr in cos_sims_full_df["Test_vs_null_FDR"] - ] - cos_sims_full_df = cos_sims_full_df.sort_values( - by=["Sig", "Test_vs_null_avg_shift", "Test_vs_null_FDR"], - ascending=[False, False, True], - ) + cos_sims_full_df["Test_vs_null_FDR"] = get_fdr(cos_sims_full_df["Test_vs_null_pval"]) + + cos_sims_full_df["Sig"] = [1 if fdr<0.05 else 0 for fdr in cos_sims_full_df["Test_vs_null_FDR"]] + cos_sims_full_df = cos_sims_full_df.sort_values(by=["Sig", + "Test_vs_null_avg_shift", + "Test_vs_null_FDR"], + ascending=[False,False,True]) return cos_sims_full_df - # stats for identifying perturbations with largest effect within a given set of cells # fits a mixture model to 2 components (impact vs. non-impact) and # reports the most likely component for each test perturbation # Note: because assumes given perturbation has a consistent effect in the cells tested, # we recommend only using the mixture model strategy with uniform cell populations def isp_stats_mixture_model(cos_sims_df, dict_list, combos, anchor_token): - names = ["Gene", "Gene_name", "Ensembl_ID"] - + + names=["Gene", + "Gene_name", + "Ensembl_ID"] + if combos == 0: names += ["Test_avg_shift"] elif combos == 1: - names += [ - "Anchor_shift", - "Test_token_shift", - "Sum_of_indiv_shifts", - "Combo_shift", - "Combo_minus_sum_shift", - ] - - names += ["Impact_component", "Impact_component_percent"] + names += ["Anchor_shift", + "Test_token_shift", + "Sum_of_indiv_shifts", + "Combo_shift", + "Combo_minus_sum_shift"] + + names += ["Impact_component", + "Impact_component_percent"] cos_sims_full_df = pd.DataFrame(columns=names) avg_values = [] gene_names = [] - + for i in trange(cos_sims_df.shape[0]): token = cos_sims_df["Gene"][i] name = cos_sims_df["Gene_name"][i] ensembl_id = cos_sims_df["Ensembl_ID"][i] cos_shift_data = [] - + for dict_i in dict_list: if (combos == 0) and (anchor_token is not None): - cos_shift_data += dict_i.get((anchor_token, token), []) + cos_shift_data += dict_i.get((anchor_token, token),[]) else: - cos_shift_data += dict_i.get((token, "cell_emb"), []) - + cos_shift_data += dict_i.get((token, "cell_emb"),[]) + # Extract values for current gene if combos == 0: test_values = cos_shift_data elif combos == 1: test_values = [] for tup in cos_shift_data: - test_values.append(tup[2]) - + test_values.append(tup[2]) + if len(test_values) > 0: avg_value = np.mean(test_values) avg_values.append(avg_value) gene_names.append(name) - + # fit Gaussian mixture model to dataset of mean for each gene avg_values_to_fit = np.array(avg_values).reshape(-1, 1) gm = GaussianMixture(n_components=2, random_state=0).fit(avg_values_to_fit) - + for i in trange(cos_sims_df.shape[0]): token = cos_sims_df["Gene"][i] name = cos_sims_df["Gene_name"][i] @@ -583,101 +352,71 @@ def isp_stats_mixture_model(cos_sims_df, dict_list, combos, anchor_token): for dict_i in dict_list: if (combos == 0) and (anchor_token is not None): - cos_shift_data += dict_i.get((anchor_token, token), []) + cos_shift_data += dict_i.get((anchor_token, token),[]) else: - cos_shift_data += dict_i.get((token, "cell_emb"), []) - + cos_shift_data += dict_i.get((token, "cell_emb"),[]) + if combos == 0: mean_test = np.mean(cos_shift_data) - impact_components = [ - get_impact_component(value, gm) for value in cos_shift_data - ] + impact_components = [get_impact_component(value,gm) for value in cos_shift_data] elif combos == 1: - anchor_cos_sim_megalist = [ - anchor for anchor, token, combo in cos_shift_data - ] - token_cos_sim_megalist = [token for anchor, token, combo in cos_shift_data] - anchor_plus_token_cos_sim_megalist = [ - 1 - ((1 - anchor) + (1 - token)) - for anchor, token, combo in cos_shift_data - ] - combo_anchor_token_cos_sim_megalist = [ - combo for anchor, token, combo in cos_shift_data - ] - combo_minus_sum_cos_sim_megalist = [ - combo - (1 - ((1 - anchor) + (1 - token))) - for anchor, token, combo in cos_shift_data - ] + anchor_cos_sim_megalist = [anchor for anchor,token,combo in cos_shift_data] + token_cos_sim_megalist = [token for anchor,token,combo in cos_shift_data] + anchor_plus_token_cos_sim_megalist = [1-((1-anchor)+(1-token)) for anchor,token,combo in cos_shift_data] + combo_anchor_token_cos_sim_megalist = [combo for anchor,token,combo in cos_shift_data] + combo_minus_sum_cos_sim_megalist = [combo-(1-((1-anchor)+(1-token))) for anchor,token,combo in cos_shift_data] mean_anchor = np.mean(anchor_cos_sim_megalist) mean_token = np.mean(token_cos_sim_megalist) mean_sum = np.mean(anchor_plus_token_cos_sim_megalist) mean_test = np.mean(combo_anchor_token_cos_sim_megalist) mean_combo_minus_sum = np.mean(combo_minus_sum_cos_sim_megalist) - - impact_components = [ - get_impact_component(value, gm) - for value in combo_anchor_token_cos_sim_megalist - ] - - impact_component = get_impact_component(mean_test, gm) - impact_component_percent = np.mean(impact_components) * 100 - - data_i = [token, name, ensembl_id] + + impact_components = [get_impact_component(value,gm) for value in combo_anchor_token_cos_sim_megalist] + + impact_component = get_impact_component(mean_test,gm) + impact_component_percent = np.mean(impact_components)*100 + + data_i = [token, + name, + ensembl_id] if combos == 0: data_i += [mean_test] elif combos == 1: - data_i += [ - mean_anchor, - mean_token, - mean_sum, - mean_test, - mean_combo_minus_sum, - ] - data_i += [impact_component, impact_component_percent] - - cos_sims_df_i = pd.DataFrame(dict(zip(names, data_i)), index=[i]) - cos_sims_full_df = pd.concat([cos_sims_full_df, cos_sims_df_i]) - + data_i += [mean_anchor, + mean_token, + mean_sum, + mean_test, + mean_combo_minus_sum] + data_i += [impact_component, + impact_component_percent] + + cos_sims_df_i = pd.DataFrame(dict(zip(names,data_i)),index=[i]) + cos_sims_full_df = pd.concat([cos_sims_full_df,cos_sims_df_i]) + # quantify number of detections of each gene - if anchor_token is None: - cos_sims_full_df["N_Detections"] = [ - n_detections(i, dict_list, "cell", anchor_token) - for i in cos_sims_full_df["Gene"] - ] - else: - cos_sims_full_df["N_Detections"] = [ - n_detections(i, dict_list, "gene", anchor_token) - for i in cos_sims_full_df["Gene"] - ] - + cos_sims_full_df["N_Detections"] = [n_detections(i, + dict_list, + "gene", + anchor_token) for i in cos_sims_full_df["Gene"]] + if combos == 0: - cos_sims_full_df = cos_sims_full_df.sort_values( - by=["Impact_component", "Test_avg_shift"], ascending=[False, True] - ) + cos_sims_full_df = cos_sims_full_df.sort_values(by=["Impact_component", + "Test_avg_shift"], + ascending=[False,True]) elif combos == 1: - cos_sims_full_df = cos_sims_full_df.sort_values( - by=["Impact_component", "Combo_minus_sum_shift"], ascending=[False, True] - ) + cos_sims_full_df = cos_sims_full_df.sort_values(by=["Impact_component", + "Combo_minus_sum_shift"], + ascending=[False,True]) return cos_sims_full_df - class InSilicoPerturberStats: valid_option_dict = { - "mode": { - "goal_state_shift", - "vs_null", - "mixture_model", - "aggregate_data", - "aggregate_gene_shifts", - }, - "genes_perturbed": {"all", list}, - "combos": {0, 1}, + "mode": {"goal_state_shift","vs_null","mixture_model","aggregate_data"}, + "combos": {0,1}, "anchor_gene": {None, str}, "cell_states_to_model": {None, dict}, - "pickle_suffix": {None, str}, } - def __init__( self, mode="mixture_model", @@ -685,49 +424,47 @@ class InSilicoPerturberStats: combos=0, anchor_gene=None, cell_states_to_model=None, - pickle_suffix="_raw.pickle", token_dictionary_file=TOKEN_DICTIONARY_FILE, - gene_name_id_dictionary_file=ENSEMBL_DICTIONARY_FILE, + gene_name_id_dictionary_file=GENE_NAME_ID_DICTIONARY_FILE, ): """ Initialize in silico perturber stats generator. - **Parameters:** - - mode : {"goal_state_shift", "vs_null", "mixture_model", "aggregate_data", "aggregate_gene_shifts"} - | Type of stats. - | "goal_state_shift": perturbation vs. random for desired cell state shift - | "vs_null": perturbation vs. null from provided null distribution dataset - | "mixture_model": perturbation in impact vs. no impact component of mixture model (no goal direction) - | "aggregate_data": aggregates cosine shifts for single perturbation in multiple cells - | "aggregate_gene_shifts": aggregates cosine shifts of genes in response to perturbation(s) + Parameters + ---------- + mode : {"goal_state_shift","vs_null","mixture_model","aggregate_data"} + Type of stats. + "goal_state_shift": perturbation vs. random for desired cell state shift + "vs_null": perturbation vs. null from provided null distribution dataset + "mixture_model": perturbation in impact vs. no impact component of mixture model (no goal direction) + "aggregate_data": aggregates cosine shifts for single perturbation in multiple cells genes_perturbed : "all", list - | Genes perturbed in isp experiment. - | Default is assuming genes_to_perturb in isp experiment was "all" (each gene in each cell). - | Otherwise, may provide a list of ENSEMBL IDs of genes perturbed as a group all together. + Genes perturbed in isp experiment. + Default is assuming genes_to_perturb in isp experiment was "all" (each gene in each cell). + Otherwise, may provide a list of ENSEMBL IDs of genes perturbed as a group all together. combos : {0,1,2} - | Whether genex perturbed in isp experiment were perturbed individually (0), in pairs (1), or in triplets (2). + Whether to perturb genes individually (0), in pairs (1), or in triplets (2). anchor_gene : None, str - | ENSEMBL ID of gene to use as anchor in combination perturbations or in testing effect on downstream genes. - | For example, if combos=1 and anchor_gene="ENSG00000136574": - | analyzes data for anchor gene perturbed in combination with each other gene. - | However, if combos=0 and anchor_gene="ENSG00000136574": - | analyzes data for the effect of anchor gene's perturbation on the embedding of each other gene. + ENSEMBL ID of gene to use as anchor in combination perturbations or in testing effect on downstream genes. + For example, if combos=1 and anchor_gene="ENSG00000136574": + analyzes data for anchor gene perturbed in combination with each other gene. + However, if combos=0 and anchor_gene="ENSG00000136574": + analyzes data for the effect of anchor gene's perturbation on the embedding of each other gene. cell_states_to_model: None, dict - | Cell states to model if testing perturbations that achieve goal state change. - | Four-item dictionary with keys: state_key, start_state, goal_state, and alt_states - | state_key: key specifying name of column in .dataset that defines the start/goal states - | start_state: value in the state_key column that specifies the start state - | goal_state: value in the state_key column taht specifies the goal end state - | alt_states: list of values in the state_key column that specify the alternate end states - | For example: {"state_key": "disease", - | "start_state": "dcm", - | "goal_state": "nf", - | "alt_states": ["hcm", "other1", "other2"]} + Cell states to model if testing perturbations that achieve goal state change. + Four-item dictionary with keys: state_key, start_state, goal_state, and alt_states + state_key: key specifying name of column in .dataset that defines the start/goal states + start_state: value in the state_key column that specifies the start state + goal_state: value in the state_key column taht specifies the goal end state + alt_states: list of values in the state_key column that specify the alternate end states + For example: {"state_key": "disease", + "start_state": "dcm", + "goal_state": "nf", + "alt_states": ["hcm", "other1", "other2"]} token_dictionary_file : Path - | Path to pickle file containing token dictionary (Ensembl ID:token). + Path to pickle file containing token dictionary (Ensembl ID:token). gene_name_id_dictionary_file : Path - | Path to pickle file containing gene name to ID dictionary (gene name:Ensembl ID). + Path to pickle file containing gene name to ID dictionary (gene name:Ensembl ID). """ self.mode = mode @@ -735,14 +472,13 @@ class InSilicoPerturberStats: self.combos = combos self.anchor_gene = anchor_gene self.cell_states_to_model = cell_states_to_model - self.pickle_suffix = pickle_suffix - + self.validate_options() # load token dictionary (Ensembl IDs:token) with open(token_dictionary_file, "rb") as f: self.gene_token_dict = pickle.load(f) - + # load gene name dictionary (gene name:Ensembl ID) with open(gene_name_id_dictionary_file, "rb") as f: self.gene_name_id_dict = pickle.load(f) @@ -753,7 +489,7 @@ class InSilicoPerturberStats: self.anchor_token = self.gene_token_dict[self.anchor_gene] def validate_options(self): - for attr_name, valid_options in self.valid_option_dict.items(): + for attr_name,valid_options in self.valid_option_dict.items(): attr_value = self.__dict__[attr_name] if type(attr_value) not in {list, dict}: if attr_name in {"anchor_gene"}: @@ -762,40 +498,35 @@ class InSilicoPerturberStats: continue valid_type = False for option in valid_options: - if (option in [str, int, list, dict]) and isinstance( - attr_value, option - ): + if (option in [int,list,dict]) and isinstance(attr_value, option): valid_type = True break - if not valid_type: - logger.error( - f"Invalid option for {attr_name}. " - f"Valid options for {attr_name}: {valid_options}" - ) - raise - + if valid_type: + continue + logger.error( + f"Invalid option for {attr_name}. " \ + f"Valid options for {attr_name}: {valid_options}" + ) + raise + if self.cell_states_to_model is not None: if len(self.cell_states_to_model.items()) == 1: logger.warning( - "The single value dictionary for cell_states_to_model will be " - "replaced with a dictionary with named keys for start, goal, and alternate states. " - "Please specify state_key, start_state, goal_state, and alt_states " - "in the cell_states_to_model dictionary for future use. " - "For example, cell_states_to_model={" - "'state_key': 'disease', " - "'start_state': 'dcm', " - "'goal_state': 'nf', " - "'alt_states': ['hcm', 'other1', 'other2']}" + "The single value dictionary for cell_states_to_model will be " \ + "replaced with a dictionary with named keys for start, goal, and alternate states. " \ + "Please specify state_key, start_state, goal_state, and alt_states " \ + "in the cell_states_to_model dictionary for future use. " \ + "For example, cell_states_to_model={" \ + "'state_key': 'disease', " \ + "'start_state': 'dcm', " \ + "'goal_state': 'nf', " \ + "'alt_states': ['hcm', 'other1', 'other2']}" ) - for key, value in self.cell_states_to_model.items(): + for key,value in self.cell_states_to_model.items(): if (len(value) == 3) and isinstance(value, tuple): - if ( - isinstance(value[0], list) - and isinstance(value[1], list) - and isinstance(value[2], list) - ): + if isinstance(value[0],list) and isinstance(value[1],list) and isinstance(value[2],list): if len(value[0]) == 1 and len(value[1]) == 1: - all_values = value[0] + value[1] + value[2] + all_values = value[0]+value[1]+value[2] if len(all_values) == len(set(all_values)): continue # reformat to the new named key format @@ -804,176 +535,136 @@ class InSilicoPerturberStats: "state_key": list(self.cell_states_to_model.keys())[0], "start_state": state_values[0][0], "goal_state": state_values[1][0], - "alt_states": state_values[2:][0], + "alt_states": state_values[2:][0] } - elif set(self.cell_states_to_model.keys()) == { - "state_key", - "start_state", - "goal_state", - "alt_states", - }: - if ( - (self.cell_states_to_model["state_key"] is None) - or (self.cell_states_to_model["start_state"] is None) - or (self.cell_states_to_model["goal_state"] is None) - ): + elif set(self.cell_states_to_model.keys()) == {"state_key", "start_state", "goal_state", "alt_states"}: + if (self.cell_states_to_model["state_key"] is None) \ + or (self.cell_states_to_model["start_state"] is None) \ + or (self.cell_states_to_model["goal_state"] is None): logger.error( - "Please specify 'state_key', 'start_state', and 'goal_state' in cell_states_to_model." - ) + "Please specify 'state_key', 'start_state', and 'goal_state' in cell_states_to_model.") raise - - if ( - self.cell_states_to_model["start_state"] - == self.cell_states_to_model["goal_state"] - ): - logger.error("All states must be unique.") + + if self.cell_states_to_model["start_state"] == self.cell_states_to_model["goal_state"]: + logger.error( + "All states must be unique.") raise if self.cell_states_to_model["alt_states"] is not None: - if not isinstance(self.cell_states_to_model["alt_states"], list): + if type(self.cell_states_to_model["alt_states"]) is not list: logger.error( "self.cell_states_to_model['alt_states'] must be a list (even if it is one element)." ) raise - if len(self.cell_states_to_model["alt_states"]) != len( - set(self.cell_states_to_model["alt_states"]) - ): - logger.error("All states must be unique.") + if len(self.cell_states_to_model["alt_states"])!= len(set(self.cell_states_to_model["alt_states"])): + logger.error( + "All states must be unique.") raise - elif set(self.cell_states_to_model.keys()) == { - "state_key", - "start_state", - "goal_state", - }: - self.cell_states_to_model["alt_states"] = [] else: logger.error( - "cell_states_to_model must only have the following four keys: " - "'state_key', 'start_state', 'goal_state', 'alt_states'." - "For example, cell_states_to_model={" - "'state_key': 'disease', " - "'start_state': 'dcm', " - "'goal_state': 'nf', " - "'alt_states': ['hcm', 'other1', 'other2']}" + "cell_states_to_model must only have the following four keys: " \ + "'state_key', 'start_state', 'goal_state', 'alt_states'." \ + "For example, cell_states_to_model={" \ + "'state_key': 'disease', " \ + "'start_state': 'dcm', " \ + "'goal_state': 'nf', " \ + "'alt_states': ['hcm', 'other1', 'other2']}" ) raise if self.anchor_gene is not None: self.anchor_gene = None logger.warning( - "anchor_gene set to None. " - "Currently, anchor gene not available " - "when modeling multiple cell states." - ) - + "anchor_gene set to None. " \ + "Currently, anchor gene not available " \ + "when modeling multiple cell states.") + if self.combos > 0: if self.anchor_gene is None: logger.error( - "Currently, stats are only supported for combination " - "in silico perturbation run with anchor gene. Please add " - "anchor gene when using with combos > 0. " - ) + "Currently, stats are only supported for combination " \ + "in silico perturbation run with anchor gene. Please add " \ + "anchor gene when using with combos > 0. ") raise - + if (self.mode == "mixture_model") and (self.genes_perturbed != "all"): logger.error( - "Mixture model mode requires multiple gene perturbations to fit model " - "so is incompatible with a single grouped perturbation." - ) + "Mixture model mode requires multiple gene perturbations to fit model " \ + "so is incompatible with a single grouped perturbation.") raise if (self.mode == "aggregate_data") and (self.genes_perturbed == "all"): logger.error( - "Simple data aggregation mode is for single perturbation in multiple cells " - "so is incompatible with a genes_perturbed being 'all'." - ) - raise - - def get_stats( - self, - input_data_directory, - null_dist_data_directory, - output_directory, - output_prefix, - null_dict_list=None, - ): + "Simple data aggregation mode is for single perturbation in multiple cells " \ + "so is incompatible with a genes_perturbed being 'all'.") + raise + + def get_stats(self, + input_data_directory, + null_dist_data_directory, + output_directory, + output_prefix): """ Get stats for in silico perturbation data and save as results in output_directory. - **Parameters:** - + Parameters + ---------- input_data_directory : Path - | Path to directory containing cos_sim dictionary inputs + Path to directory containing cos_sim dictionary inputs null_dist_data_directory : Path - | Path to directory containing null distribution cos_sim dictionary inputs + Path to directory containing null distribution cos_sim dictionary inputs output_directory : Path - | Path to directory where perturbation data will be saved as .csv + Path to directory where perturbation data will be saved as .csv output_prefix : str - | Prefix for output .csv - null_dict_list: list[dict] - | List of loaded null distribution dictionary if more than one comparison vs. the null is to be performed - - **Outputs:** - + Prefix for output .csv + + Outputs + ---------- Definition of possible columns in .csv output file. - - | Of note, not all columns will be present in all output files. - | Some columns are specific to particular perturbation modes. - - | "Gene": gene token - | "Gene_name": gene name - | "Ensembl_ID": gene Ensembl ID - | "N_Detections": number of cells in which each gene or gene combination was detected in the input dataset - | "Sig": 1 if FDR<0.05, otherwise 0 - - | "Shift_to_goal_end": cosine shift from start state towards goal end state in response to given perturbation - | "Shift_to_alt_end": cosine shift from start state towards alternate end state in response to given perturbation - | "Goal_end_vs_random_pval": pvalue of cosine shift from start state towards goal end state by Wilcoxon - | pvalue compares shift caused by perturbing given gene compared to random genes - | "Alt_end_vs_random_pval": pvalue of cosine shift from start state towards alternate end state by Wilcoxon - | pvalue compares shift caused by perturbing given gene compared to random genes - | "Goal_end_FDR": Benjamini-Hochberg correction of "Goal_end_vs_random_pval" - | "Alt_end_FDR": Benjamini-Hochberg correction of "Alt_end_vs_random_pval" - - | "Test_avg_shift": cosine shift in response to given perturbation in cells from test distribution - | "Null_avg_shift": cosine shift in response to given perturbation in cells from null distribution (e.g. random cells) - | "Test_vs_null_avg_shift": difference in cosine shift in cells from test vs. null distribution - | (i.e. "Test_avg_shift" minus "Null_avg_shift") - | "Test_vs_null_pval": pvalue of cosine shift in test vs. null distribution - | "Test_vs_null_FDR": Benjamini-Hochberg correction of "Test_vs_null_pval" - | "N_Detections_test": "N_Detections" in cells from test distribution - | "N_Detections_null": "N_Detections" in cells from null distribution - - | "Anchor_shift": cosine shift in response to given perturbation of anchor gene - | "Test_token_shift": cosine shift in response to given perturbation of test gene - | "Sum_of_indiv_shifts": sum of cosine shifts in response to individually perturbing test and anchor genes - | "Combo_shift": cosine shift in response to given perturbation of both anchor and test gene(s) in combination - | "Combo_minus_sum_shift": difference of cosine shifts in response combo perturbation vs. sum of individual perturbations - | (i.e. "Combo_shift" minus "Sum_of_indiv_shifts") - | "Impact_component": whether the given perturbation was modeled to be within the impact component by the mixture model - | 1: within impact component; 0: not within impact component - | "Impact_component_percent": percent of cells in which given perturbation was modeled to be within impact component - - | In case of aggregating data / gene shifts: - | "Perturbed": ID(s) of gene(s) being perturbed - | "Affected": ID of affected gene or "cell_emb" indicating the impact on the cell embedding as a whole - | "Cosine_sim_mean": mean of cosine similarity of cell or affected gene in original vs. perturbed - | "Cosine_sim_stdev": standard deviation of cosine similarity of cell or affected gene in original vs. perturbed + + Of note, not all columns will be present in all output files. + Some columns are specific to particular perturbation modes. + + "Gene": gene token + "Gene_name": gene name + "Ensembl_ID": gene Ensembl ID + "N_Detections": number of cells in which each gene or gene combination was detected in the input dataset + "Sig": 1 if FDR<0.05, otherwise 0 + + "Shift_to_goal_end": cosine shift from start state towards goal end state in response to given perturbation + "Shift_to_alt_end": cosine shift from start state towards alternate end state in response to given perturbation + "Goal_end_vs_random_pval": pvalue of cosine shift from start state towards goal end state by Wilcoxon + pvalue compares shift caused by perturbing given gene compared to random genes + "Alt_end_vs_random_pval": pvalue of cosine shift from start state towards alternate end state by Wilcoxon + pvalue compares shift caused by perturbing given gene compared to random genes + "Goal_end_FDR": Benjamini-Hochberg correction of "Goal_end_vs_random_pval" + "Alt_end_FDR": Benjamini-Hochberg correction of "Alt_end_vs_random_pval" + + "Test_avg_shift": cosine shift in response to given perturbation in cells from test distribution + "Null_avg_shift": cosine shift in response to given perturbation in cells from null distribution (e.g. random cells) + "Test_vs_null_avg_shift": difference in cosine shift in cells from test vs. null distribution + (i.e. "Test_avg_shift" minus "Null_avg_shift") + "Test_vs_null_pval": pvalue of cosine shift in test vs. null distribution + "Test_vs_null_FDR": Benjamini-Hochberg correction of "Test_vs_null_pval" + "N_Detections_test": "N_Detections" in cells from test distribution + "N_Detections_null": "N_Detections" in cells from null distribution + + "Anchor_shift": cosine shift in response to given perturbation of anchor gene + "Test_token_shift": cosine shift in response to given perturbation of test gene + "Sum_of_indiv_shifts": sum of cosine shifts in response to individually perturbing test and anchor genes + "Combo_shift": cosine shift in response to given perturbation of both anchor and test gene(s) in combination + "Combo_minus_sum_shift": difference of cosine shifts in response combo perturbation vs. sum of individual perturbations + (i.e. "Combo_shift" minus "Sum_of_indiv_shifts") + "Impact_component": whether the given perturbation was modeled to be within the impact component by the mixture model + 1: within impact component; 0: not within impact component + "Impact_component_percent": percent of cells in which given perturbation was modeled to be within impact component """ - if self.mode not in [ - "goal_state_shift", - "vs_null", - "mixture_model", - "aggregate_data", - "aggregate_gene_shifts", - ]: + if self.mode not in ["goal_state_shift", "vs_null", "mixture_model","aggregate_data"]: logger.error( - "Currently, only modes available are stats for goal_state_shift, " - "vs_null (comparing to null distribution), " - "mixture_model (fitting mixture model for perturbations with or without impact), " - "and aggregating data for single perturbations or for gene embedding shifts." - ) + "Currently, only modes available are stats for goal_state_shift, " \ + "vs_null (comparing to null distribution), and " \ + "mixture_model (fitting mixture model for perturbations with or without impact.") raise self.gene_token_id_dict = invert_dict(self.gene_token_dict) @@ -982,123 +673,44 @@ class InSilicoPerturberStats: # obtain total gene list if (self.combos == 0) and (self.anchor_token is not None): # cos sim data for effect of gene perturbation on the embedding of each other gene - dict_list = read_dictionaries( - input_data_directory, - "gene", - self.anchor_token, - self.cell_states_to_model, - self.pickle_suffix, - ) + dict_list = read_dictionaries(input_data_directory, "gene", self.anchor_token) gene_list = get_gene_list(dict_list, "gene") - elif ( - (self.combos == 0) - and (self.anchor_token is None) - and (self.mode == "aggregate_gene_shifts") - ): - dict_list = read_dictionaries( - input_data_directory, - "gene", - self.anchor_token, - self.cell_states_to_model, - self.pickle_suffix, - ) - gene_list = get_gene_list(dict_list, "cell") else: # cos sim data for effect of gene perturbation on the embedding of each cell - dict_list = read_dictionaries( - input_data_directory, - "cell", - self.anchor_token, - self.cell_states_to_model, - self.pickle_suffix, - ) + dict_list = read_dictionaries(input_data_directory, "cell", self.anchor_token) gene_list = get_gene_list(dict_list, "cell") - + # initiate results dataframe - cos_sims_df_initial = pd.DataFrame( - { - "Gene": gene_list, - "Gene_name": [self.token_to_gene_name(item) for item in gene_list], - "Ensembl_ID": [ - token_tuple_to_ensembl_ids(genes, self.gene_token_id_dict) - if self.genes_perturbed != "all" - else self.gene_token_id_dict[genes[1]] - if isinstance(genes, tuple) - else self.gene_token_id_dict[genes] - for genes in gene_list - ], - }, - index=[i for i in range(len(gene_list))], - ) + cos_sims_df_initial = pd.DataFrame({"Gene": gene_list, + "Gene_name": [self.token_to_gene_name(item) \ + for item in gene_list], \ + "Ensembl_ID": [token_tuple_to_ensembl_ids(genes, self.gene_token_id_dict) \ + if self.genes_perturbed != "all" else \ + self.gene_token_id_dict[genes[1]] \ + if isinstance(genes,tuple) else \ + self.gene_token_id_dict[genes] \ + for genes in gene_list]}, \ + index=[i for i in range(len(gene_list))]) if self.mode == "goal_state_shift": - cos_sims_df = isp_stats_to_goal_state( - cos_sims_df_initial, - dict_list, - self.cell_states_to_model, - self.genes_perturbed, - ) - + cos_sims_df = isp_stats_to_goal_state(cos_sims_df_initial, dict_list, self.cell_states_to_model, self.genes_perturbed) + elif self.mode == "vs_null": - if null_dict_list is None: - null_dict_list = read_dictionaries( - null_dist_data_directory, - "cell", - self.anchor_token, - self.cell_states_to_model, - self.pickle_suffix, - ) - cos_sims_df = isp_stats_vs_null( - cos_sims_df_initial, dict_list, null_dict_list - ) + null_dict_list = read_dictionaries(null_dist_data_directory, "cell", self.anchor_token) + cos_sims_df = isp_stats_vs_null(cos_sims_df_initial, dict_list, null_dict_list) elif self.mode == "mixture_model": - cos_sims_df = isp_stats_mixture_model( - cos_sims_df_initial, dict_list, self.combos, self.anchor_token - ) - + cos_sims_df = isp_stats_mixture_model(cos_sims_df_initial, dict_list, self.combos, self.anchor_token) + elif self.mode == "aggregate_data": - cos_sims_df = isp_aggregate_grouped_perturb( - cos_sims_df_initial, dict_list, self.genes_perturbed - ) - - elif self.mode == "aggregate_gene_shifts": - if (self.genes_perturbed == "all") and (self.combos == 0): - tuple_types = [ - True if isinstance(genes, tuple) else False for genes in gene_list - ] - if all(tuple_types): - token_dtype = "tuple" - elif not any(tuple_types): - token_dtype = "nontuple" - else: - token_dtype = "mix" - else: - token_dtype = "mix" - - cos_sims_df = isp_aggregate_gene_shifts( - cos_sims_df_initial, - dict_list, - self.gene_token_id_dict, - self.gene_id_name_dict, - token_dtype, - ) + cos_sims_df = isp_aggregate_grouped_perturb(cos_sims_df_initial, dict_list) # save perturbation stats to output_path output_path = (Path(output_directory) / output_prefix).with_suffix(".csv") cos_sims_df.to_csv(output_path) def token_to_gene_name(self, item): - if np.issubdtype(type(item), np.integer): - return self.gene_id_name_dict.get( - self.gene_token_id_dict.get(item, np.nan), np.nan - ) - if isinstance(item, tuple): - return tuple( - [ - self.gene_id_name_dict.get( - self.gene_token_id_dict.get(i, np.nan), np.nan - ) - for i in item - ] - ) + if isinstance(item,int): + return self.gene_id_name_dict.get(self.gene_token_id_dict.get(item, np.nan), np.nan) + if isinstance(item,tuple): + return tuple([self.gene_id_name_dict.get(self.gene_token_id_dict.get(i, np.nan), np.nan) for i in item]) diff --git a/geneformer/mtl/__init__.py b/geneformer/mtl/__init__.py deleted file mode 100644 index 06788a56ac11397d1698a74381d466b7b7bd98b7..0000000000000000000000000000000000000000 --- a/geneformer/mtl/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# ruff: noqa: F401 \ No newline at end of file diff --git a/geneformer/mtl/collators.py b/geneformer/mtl/collators.py deleted file mode 100644 index 63546f93a05c857781198be88de027f5fb9e827f..0000000000000000000000000000000000000000 --- a/geneformer/mtl/collators.py +++ /dev/null @@ -1,76 +0,0 @@ -# imports -import torch -import pickle -from ..collator_for_classification import DataCollatorForGeneClassification -from .. import TOKEN_DICTIONARY_FILE - -"""Geneformer collator for multi-task cell classification.""" - -class DataCollatorForMultitaskCellClassification(DataCollatorForGeneClassification): - class_type = "cell" - - @staticmethod - def load_token_dictionary(): - with open(TOKEN_DICTIONARY_FILE, 'rb') as f: - return pickle.load(f) - - def __init__(self, *args, **kwargs) -> None: - # Load the token dictionary - token_dictionary = self.load_token_dictionary() - # Use the loaded token dictionary - super().__init__(token_dictionary=token_dictionary, *args, **kwargs) - - def _prepare_batch(self, features): - # Process inputs as usual - batch = self.tokenizer.pad( - features, - class_type=self.class_type, - padding=self.padding, - max_length=self.max_length, - pad_to_multiple_of=self.pad_to_multiple_of, - return_tensors="pt", - ) - - # Check if labels are present - if "label" in features[0]: - # Initialize labels dictionary for all tasks - labels = {task: [] for task in features[0]["label"].keys()} - # Populate labels for each task - for feature in features: - for task, label in feature["label"].items(): - labels[task].append(label) - - # Convert label lists to tensors, handling dictionaries appropriately - for task in labels: - if isinstance(labels[task][0], (list, torch.Tensor)): - dtype = torch.long - labels[task] = torch.tensor(labels[task], dtype=dtype) - elif isinstance(labels[task][0], dict): - # Handle dict specifically if needed - pass # Resolve nested data structure - - # Update the batch to include task-specific labels - batch["labels"] = labels - else: - # If no labels are present, create empty labels for all tasks - batch["labels"] = { - task: torch.tensor([], dtype=torch.long) - for task in features[0]["input_ids"].keys() - } - - return batch - - def __call__(self, features): - batch = self._prepare_batch(features) - for k, v in batch.items(): - if torch.is_tensor(v): - batch[k] = v.clone().detach() - elif isinstance(v, dict): - # Assuming nested structure needs conversion - batch[k] = { - task: torch.tensor(labels, dtype=torch.int64) - for task, labels in v.items() - } - else: - batch[k] = torch.tensor(v, dtype=torch.int64) - return batch \ No newline at end of file diff --git a/geneformer/mtl/data.py b/geneformer/mtl/data.py deleted file mode 100644 index 402ca952b5357932a6ff7cb9f5d0ec21551d44b8..0000000000000000000000000000000000000000 --- a/geneformer/mtl/data.py +++ /dev/null @@ -1,162 +0,0 @@ -import os -from .collators import DataCollatorForMultitaskCellClassification -from .imports import * - -def validate_columns(dataset, required_columns, dataset_type): - """Ensures required columns are present in the dataset.""" - missing_columns = [col for col in required_columns if col not in dataset.column_names] - if missing_columns: - raise KeyError( - f"Missing columns in {dataset_type} dataset: {missing_columns}. " - f"Available columns: {dataset.column_names}" - ) - - -def create_label_mappings(dataset, task_to_column): - """Creates label mappings for the dataset.""" - task_label_mappings = {} - num_labels_list = [] - for task, column in task_to_column.items(): - unique_values = sorted(set(dataset[column])) - mapping = {label: idx for idx, label in enumerate(unique_values)} - task_label_mappings[task] = mapping - num_labels_list.append(len(unique_values)) - return task_label_mappings, num_labels_list - - -def save_label_mappings(mappings, path): - """Saves label mappings to a pickle file.""" - with open(path, "wb") as f: - pickle.dump(mappings, f) - - -def load_label_mappings(path): - """Loads label mappings from a pickle file.""" - with open(path, "rb") as f: - return pickle.load(f) - - -def transform_dataset(dataset, task_to_column, task_label_mappings, config, is_test): - """Transforms the dataset to the required format.""" - transformed_dataset = [] - cell_id_mapping = {} - - for idx, record in enumerate(dataset): - transformed_record = { - "input_ids": torch.tensor(record["input_ids"], dtype=torch.long), - "cell_id": idx, # Index-based cell ID - } - - if not is_test: - label_dict = { - task: task_label_mappings[task][record[column]] - for task, column in task_to_column.items() - } - else: - label_dict = {task: -1 for task in config["task_names"]} - - transformed_record["label"] = label_dict - transformed_dataset.append(transformed_record) - cell_id_mapping[idx] = record.get("unique_cell_id", idx) - - return transformed_dataset, cell_id_mapping - - -def load_and_preprocess_data(dataset_path, config, is_test=False, dataset_type=""): - """Main function to load and preprocess data.""" - try: - dataset = load_from_disk(dataset_path) - - # Setup task and column mappings - task_names = [f"task{i+1}" for i in range(len(config["task_columns"]))] - task_to_column = dict(zip(task_names, config["task_columns"])) - config["task_names"] = task_names - - label_mappings_path = os.path.join( - config["results_dir"], - f"task_label_mappings{'_val' if dataset_type == 'validation' else ''}.pkl" - ) - - if not is_test: - validate_columns(dataset, task_to_column.values(), dataset_type) - - # Create and save label mappings - task_label_mappings, num_labels_list = create_label_mappings(dataset, task_to_column) - save_label_mappings(task_label_mappings, label_mappings_path) - else: - # Load existing mappings for test data - task_label_mappings = load_label_mappings(label_mappings_path) - num_labels_list = [len(mapping) for mapping in task_label_mappings.values()] - - # Transform dataset - transformed_dataset, cell_id_mapping = transform_dataset( - dataset, task_to_column, task_label_mappings, config, is_test - ) - - return transformed_dataset, cell_id_mapping, num_labels_list - - except KeyError as e: - raise ValueError(f"Configuration error or dataset key missing: {e}") - except Exception as e: - raise RuntimeError(f"Error during data loading or preprocessing: {e}") - - -def preload_and_process_data(config): - """Preloads and preprocesses train and validation datasets.""" - # Process train data and save mappings - train_data = load_and_preprocess_data(config["train_path"], config, dataset_type="train") - - # Process validation data and save mappings - val_data = load_and_preprocess_data(config["val_path"], config, dataset_type="validation") - - # Validate that the mappings match - validate_label_mappings(config) - - return (*train_data, *val_data[:2]) # Return train and val data along with mappings - - -def validate_label_mappings(config): - """Ensures train and validation label mappings are consistent.""" - train_mappings_path = os.path.join(config["results_dir"], "task_label_mappings.pkl") - val_mappings_path = os.path.join(config["results_dir"], "task_label_mappings_val.pkl") - train_mappings = load_label_mappings(train_mappings_path) - val_mappings = load_label_mappings(val_mappings_path) - - for task_name in config["task_names"]: - if train_mappings[task_name] != val_mappings[task_name]: - raise ValueError( - f"Mismatch in label mappings for task '{task_name}'.\n" - f"Train Mapping: {train_mappings[task_name]}\n" - f"Validation Mapping: {val_mappings[task_name]}" - ) - - -def get_data_loader(preprocessed_dataset, batch_size): - """Creates a DataLoader with optimal settings.""" - return DataLoader( - preprocessed_dataset, - batch_size=batch_size, - shuffle=True, - collate_fn=DataCollatorForMultitaskCellClassification(), - num_workers=os.cpu_count(), - pin_memory=True, - ) - - -def preload_data(config): - """Preprocesses train and validation data for trials.""" - train_loader = get_data_loader(*preload_and_process_data(config)[:2], config["batch_size"]) - val_loader = get_data_loader(*preload_and_process_data(config)[2:4], config["batch_size"]) - return train_loader, val_loader - - -def load_and_preprocess_test_data(config): - """Loads and preprocesses test data.""" - return load_and_preprocess_data(config["test_path"], config, is_test=True) - - -def prepare_test_loader(config): - """Prepares DataLoader for test data.""" - test_dataset, cell_id_mapping, num_labels_list = load_and_preprocess_test_data(config) - test_loader = get_data_loader(test_dataset, config["batch_size"]) - return test_loader, cell_id_mapping, num_labels_list diff --git a/geneformer/mtl/eval_utils.py b/geneformer/mtl/eval_utils.py deleted file mode 100644 index 0a8ea4babe4ab1e48cc56280ee03423075cf7563..0000000000000000000000000000000000000000 --- a/geneformer/mtl/eval_utils.py +++ /dev/null @@ -1,88 +0,0 @@ -import pandas as pd - -from .imports import * # noqa # isort:skip -from .data import prepare_test_loader # noqa # isort:skip -from .model import GeneformerMultiTask - - -def evaluate_test_dataset(model, device, test_loader, cell_id_mapping, config): - task_pred_labels = {task_name: [] for task_name in config["task_names"]} - task_pred_probs = {task_name: [] for task_name in config["task_names"]} - cell_ids = [] - - # # Load task label mappings from pickle file - # with open(f"{config['results_dir']}/task_label_mappings.pkl", "rb") as f: - # task_label_mappings = pickle.load(f) - - model.eval() - with torch.no_grad(): - for batch in test_loader: - input_ids = batch["input_ids"].to(device) - attention_mask = batch["attention_mask"].to(device) - _, logits, _ = model(input_ids, attention_mask) - for sample_idx in range(len(batch["input_ids"])): - cell_id = cell_id_mapping[batch["cell_id"][sample_idx].item()] - cell_ids.append(cell_id) - for i, task_name in enumerate(config["task_names"]): - pred_label = torch.argmax(logits[i][sample_idx], dim=-1).item() - pred_prob = ( - torch.softmax(logits[i][sample_idx], dim=-1).cpu().numpy() - ) - task_pred_labels[task_name].append(pred_label) - task_pred_probs[task_name].append(pred_prob) - - # Save test predictions with cell IDs and probabilities to CSV - test_results_dir = config["results_dir"] - os.makedirs(test_results_dir, exist_ok=True) - test_preds_file = os.path.join(test_results_dir, "test_preds.csv") - - rows = [] - for sample_idx in range(len(cell_ids)): - row = {"Cell ID": cell_ids[sample_idx]} - for task_name in config["task_names"]: - row[f"{task_name} Prediction"] = task_pred_labels[task_name][sample_idx] - row[f"{task_name} Probabilities"] = ",".join( - map(str, task_pred_probs[task_name][sample_idx]) - ) - rows.append(row) - - df = pd.DataFrame(rows) - df.to_csv(test_preds_file, index=False) - print(f"Test predictions saved to {test_preds_file}") - - -def load_and_evaluate_test_model(config): - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - test_loader, cell_id_mapping, num_labels_list = prepare_test_loader(config) - model_directory = os.path.join(config["model_save_path"], "GeneformerMultiTask") - hyperparams_path = os.path.join(model_directory, "hyperparameters.json") - - # Load the saved best hyperparameters - with open(hyperparams_path, "r") as f: - best_hyperparams = json.load(f) - - # Extract the task weights if present, otherwise set to None - task_weights = best_hyperparams.get("task_weights", None) - normalized_task_weights = task_weights if task_weights else [] - - # Print the loaded hyperparameters - print("Loaded hyperparameters:") - for param, value in best_hyperparams.items(): - if param == "task_weights": - print(f"normalized_task_weights: {value}") - else: - print(f"{param}: {value}") - - best_model_path = os.path.join(model_directory, "pytorch_model.bin") - best_model = GeneformerMultiTask( - config["pretrained_path"], - num_labels_list, - dropout_rate=best_hyperparams["dropout_rate"], - use_task_weights=config["use_task_weights"], - task_weights=normalized_task_weights, - ) - best_model.load_state_dict(torch.load(best_model_path)) - best_model.to(device) - - evaluate_test_dataset(best_model, device, test_loader, cell_id_mapping, config) - print("Evaluation completed.") diff --git a/geneformer/mtl/imports.py b/geneformer/mtl/imports.py deleted file mode 100644 index 4fe9e90945a10a3d79cc487fa15431f2915e5683..0000000000000000000000000000000000000000 --- a/geneformer/mtl/imports.py +++ /dev/null @@ -1,43 +0,0 @@ -import functools -import gc -import json -import os -import pickle -import sys -import warnings -from enum import Enum -from itertools import chain -from typing import Dict, List, Optional, Union - -import numpy as np -import optuna -import pandas as pd -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.optim as optim -from datasets import load_from_disk -from sklearn.metrics import accuracy_score, f1_score, roc_auc_score, roc_curve -from sklearn.model_selection import train_test_split -from sklearn.preprocessing import LabelEncoder -from torch.utils.data import DataLoader -from transformers import ( - AdamW, - BatchEncoding, - BertConfig, - BertModel, - DataCollatorForTokenClassification, - SpecialTokensMixin, - get_cosine_schedule_with_warmup, - get_linear_schedule_with_warmup, - get_scheduler, -) -from transformers.utils import logging, to_py_obj - -from .collators import DataCollatorForMultitaskCellClassification - -# local modules -from .data import get_data_loader, preload_and_process_data -from .model import GeneformerMultiTask -from .optuna_utils import create_optuna_study -from .utils import save_model diff --git a/geneformer/mtl/model.py b/geneformer/mtl/model.py deleted file mode 100644 index 393ebfad4f44f98d748845ea1ae81d66139988f5..0000000000000000000000000000000000000000 --- a/geneformer/mtl/model.py +++ /dev/null @@ -1,121 +0,0 @@ -import torch -import torch.nn as nn -from transformers import BertConfig, BertModel - - -class AttentionPool(nn.Module): - """Attention-based pooling layer.""" - - def __init__(self, hidden_size): - super(AttentionPool, self).__init__() - self.attention_weights = nn.Parameter(torch.randn(hidden_size, 1)) - nn.init.xavier_uniform_( - self.attention_weights - ) # https://pytorch.org/docs/stable/nn.init.html - - def forward(self, hidden_states): - attention_scores = torch.matmul(hidden_states, self.attention_weights) - attention_scores = torch.softmax(attention_scores, dim=1) - pooled_output = torch.sum(hidden_states * attention_scores, dim=1) - return pooled_output - - -class GeneformerMultiTask(nn.Module): - def __init__( - self, - pretrained_path, - num_labels_list, - dropout_rate=0.1, - use_task_weights=False, - task_weights=None, - max_layers_to_freeze=0, - use_attention_pooling=False, - ): - super(GeneformerMultiTask, self).__init__() - self.config = BertConfig.from_pretrained(pretrained_path) - self.bert = BertModel(self.config) - self.num_labels_list = num_labels_list - self.use_task_weights = use_task_weights - self.dropout = nn.Dropout(dropout_rate) - self.use_attention_pooling = use_attention_pooling - - if use_task_weights and ( - task_weights is None or len(task_weights) != len(num_labels_list) - ): - raise ValueError( - "Task weights must be defined and match the number of tasks when 'use_task_weights' is True." - ) - self.task_weights = ( - task_weights if use_task_weights else [1.0] * len(num_labels_list) - ) - - # Freeze the specified initial layers - for layer in self.bert.encoder.layer[:max_layers_to_freeze]: - for param in layer.parameters(): - param.requires_grad = False - - self.attention_pool = ( - AttentionPool(self.config.hidden_size) if use_attention_pooling else None - ) - - self.classification_heads = nn.ModuleList( - [ - nn.Linear(self.config.hidden_size, num_labels) - for num_labels in num_labels_list - ] - ) - # initialization of the classification heads: https://pytorch.org/docs/stable/nn.init.html - for head in self.classification_heads: - nn.init.xavier_uniform_(head.weight) - nn.init.zeros_(head.bias) - - def forward(self, input_ids, attention_mask, labels=None): - try: - outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask) - except Exception as e: - raise RuntimeError(f"Error during BERT forward pass: {e}") - - sequence_output = outputs.last_hidden_state - - try: - pooled_output = ( - self.attention_pool(sequence_output) - if self.use_attention_pooling - else sequence_output[:, 0, :] - ) - pooled_output = self.dropout(pooled_output) - except Exception as e: - raise RuntimeError(f"Error during pooling and dropout: {e}") - - total_loss = 0 - logits = [] - losses = [] - - for task_id, (head, num_labels) in enumerate( - zip(self.classification_heads, self.num_labels_list) - ): - try: - task_logits = head(pooled_output) - except Exception as e: - raise RuntimeError( - f"Error during forward pass of classification head {task_id}: {e}" - ) - - logits.append(task_logits) - - if labels is not None: - try: - loss_fct = nn.CrossEntropyLoss() - task_loss = loss_fct( - task_logits.view(-1, num_labels), labels[task_id].view(-1) - ) - if self.use_task_weights: - task_loss *= self.task_weights[task_id] - total_loss += task_loss - losses.append(task_loss.item()) - except Exception as e: - raise RuntimeError( - f"Error during loss computation for task {task_id}: {e}" - ) - - return total_loss, logits, losses if labels is not None else logits diff --git a/geneformer/mtl/optuna_utils.py b/geneformer/mtl/optuna_utils.py deleted file mode 100644 index 47f375e90f4030e15feb7bc1245ffbba3e6a086e..0000000000000000000000000000000000000000 --- a/geneformer/mtl/optuna_utils.py +++ /dev/null @@ -1,27 +0,0 @@ -import optuna -from optuna.integration import TensorBoardCallback - - -def save_trial_callback(study, trial, trials_result_path): - with open(trials_result_path, "a") as f: - f.write( - f"Trial {trial.number}: Value (F1 Macro): {trial.value}, Params: {trial.params}\n" - ) - - -def create_optuna_study(objective, n_trials, trials_result_path, tensorboard_log_dir): - study = optuna.create_study(direction="maximize") - - # init TensorBoard callback - tensorboard_callback = TensorBoardCallback( - dirname=tensorboard_log_dir, metric_name="F1 Macro" - ) - - # callback and TensorBoard callback - callbacks = [ - lambda study, trial: save_trial_callback(study, trial, trials_result_path), - tensorboard_callback, - ] - - study.optimize(objective, n_trials=n_trials, callbacks=callbacks) - return study diff --git a/geneformer/mtl/train.py b/geneformer/mtl/train.py deleted file mode 100644 index 5dee1fb8baf594fb137dce3802a44cc0118f1558..0000000000000000000000000000000000000000 --- a/geneformer/mtl/train.py +++ /dev/null @@ -1,380 +0,0 @@ -import os -import random - -import numpy as np -import pandas as pd -import torch -from torch.utils.tensorboard import SummaryWriter -from tqdm import tqdm - -from .imports import * -from .model import GeneformerMultiTask -from .utils import calculate_task_specific_metrics, get_layer_freeze_range - - -def set_seed(seed): - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - torch.backends.cudnn.deterministic = True - torch.backends.cudnn.benchmark = False - - -def initialize_wandb(config): - if config.get("use_wandb", False): - import wandb - - wandb.init(project=config["wandb_project"], config=config) - print("Weights & Biases (wandb) initialized and will be used for logging.") - else: - print( - "Weights & Biases (wandb) is not enabled. Logging will use other methods." - ) - - -def create_model(config, num_labels_list, device): - model = GeneformerMultiTask( - config["pretrained_path"], - num_labels_list, - dropout_rate=config["dropout_rate"], - use_task_weights=config["use_task_weights"], - task_weights=config["task_weights"], - max_layers_to_freeze=config["max_layers_to_freeze"], - use_attention_pooling=config["use_attention_pooling"], - ) - if config["use_data_parallel"]: - model = nn.DataParallel(model) - return model.to(device) - - -def setup_optimizer_and_scheduler(model, config, total_steps): - optimizer = AdamW( - model.parameters(), - lr=config["learning_rate"], - weight_decay=config["weight_decay"], - ) - warmup_steps = int(config["warmup_ratio"] * total_steps) - - if config["lr_scheduler_type"] == "linear": - scheduler = get_linear_schedule_with_warmup( - optimizer, num_warmup_steps=warmup_steps, num_training_steps=total_steps - ) - elif config["lr_scheduler_type"] == "cosine": - scheduler = get_cosine_schedule_with_warmup( - optimizer, - num_warmup_steps=warmup_steps, - num_training_steps=total_steps, - num_cycles=0.5, - ) - - return optimizer, scheduler - - -def train_epoch( - model, train_loader, optimizer, scheduler, device, config, writer, epoch -): - model.train() - progress_bar = tqdm(train_loader, desc=f"Epoch {epoch+1}/{config['epochs']}") - for batch_idx, batch in enumerate(progress_bar): - optimizer.zero_grad() - input_ids = batch["input_ids"].to(device) - attention_mask = batch["attention_mask"].to(device) - labels = [ - batch["labels"][task_name].to(device) for task_name in config["task_names"] - ] - - loss, _, _ = model(input_ids, attention_mask, labels) - loss.backward() - - if config["gradient_clipping"]: - torch.nn.utils.clip_grad_norm_(model.parameters(), config["max_grad_norm"]) - - optimizer.step() - scheduler.step() - - writer.add_scalar( - "Training Loss", loss.item(), epoch * len(train_loader) + batch_idx - ) - if config.get("use_wandb", False): - import wandb - - wandb.log({"Training Loss": loss.item()}) - - # Update progress bar - progress_bar.set_postfix({"loss": f"{loss.item():.4f}"}) - - return loss.item() # Return the last batch loss - - -def validate_model(model, val_loader, device, config): - model.eval() - val_loss = 0.0 - task_true_labels = {task_name: [] for task_name in config["task_names"]} - task_pred_labels = {task_name: [] for task_name in config["task_names"]} - task_pred_probs = {task_name: [] for task_name in config["task_names"]} - - with torch.no_grad(): - for batch in val_loader: - input_ids = batch["input_ids"].to(device) - attention_mask = batch["attention_mask"].to(device) - labels = [ - batch["labels"][task_name].to(device) - for task_name in config["task_names"] - ] - loss, logits, _ = model(input_ids, attention_mask, labels) - val_loss += loss.item() - - for sample_idx in range(len(batch["input_ids"])): - for i, task_name in enumerate(config["task_names"]): - true_label = batch["labels"][task_name][sample_idx].item() - pred_label = torch.argmax(logits[i][sample_idx], dim=-1).item() - pred_prob = ( - torch.softmax(logits[i][sample_idx], dim=-1).cpu().numpy() - ) - task_true_labels[task_name].append(true_label) - task_pred_labels[task_name].append(pred_label) - task_pred_probs[task_name].append(pred_prob) - - val_loss /= len(val_loader) - return val_loss, task_true_labels, task_pred_labels, task_pred_probs - - -def log_metrics(task_metrics, val_loss, config, writer, epochs): - for task_name, metrics in task_metrics.items(): - print( - f"{task_name} - Validation F1 Macro: {metrics['f1']:.4f}, Validation Accuracy: {metrics['accuracy']:.4f}" - ) - if config.get("use_wandb", False): - import wandb - - wandb.log( - { - f"{task_name} Validation F1 Macro": metrics["f1"], - f"{task_name} Validation Accuracy": metrics["accuracy"], - } - ) - - writer.add_scalar("Validation Loss", val_loss, epochs) - for task_name, metrics in task_metrics.items(): - writer.add_scalar(f"{task_name} - Validation F1 Macro", metrics["f1"], epochs) - writer.add_scalar( - f"{task_name} - Validation Accuracy", metrics["accuracy"], epochs - ) - - -def save_validation_predictions( - val_cell_id_mapping, - task_true_labels, - task_pred_labels, - task_pred_probs, - config, - trial_number=None, -): - if trial_number is not None: - trial_results_dir = os.path.join(config["results_dir"], f"trial_{trial_number}") - os.makedirs(trial_results_dir, exist_ok=True) - val_preds_file = os.path.join(trial_results_dir, "val_preds.csv") - else: - val_preds_file = os.path.join(config["results_dir"], "manual_run_val_preds.csv") - - rows = [] - for sample_idx in range(len(val_cell_id_mapping)): - row = {"Cell ID": val_cell_id_mapping[sample_idx]} - for task_name in config["task_names"]: - row[f"{task_name} True"] = task_true_labels[task_name][sample_idx] - row[f"{task_name} Pred"] = task_pred_labels[task_name][sample_idx] - row[f"{task_name} Probabilities"] = ",".join( - map(str, task_pred_probs[task_name][sample_idx]) - ) - rows.append(row) - - df = pd.DataFrame(rows) - df.to_csv(val_preds_file, index=False) - print(f"Validation predictions saved to {val_preds_file}") - - -def train_model( - config, - device, - train_loader, - val_loader, - train_cell_id_mapping, - val_cell_id_mapping, - num_labels_list, -): - set_seed(config["seed"]) - initialize_wandb(config) - - model = create_model(config, num_labels_list, device) - total_steps = len(train_loader) * config["epochs"] - optimizer, scheduler = setup_optimizer_and_scheduler(model, config, total_steps) - - log_dir = os.path.join(config["tensorboard_log_dir"], "manual_run") - writer = SummaryWriter(log_dir=log_dir) - - epoch_progress = tqdm(range(config["epochs"]), desc="Training Progress") - for epoch in epoch_progress: - last_loss = train_epoch( - model, train_loader, optimizer, scheduler, device, config, writer, epoch - ) - epoch_progress.set_postfix({"last_loss": f"{last_loss:.4f}"}) - - val_loss, task_true_labels, task_pred_labels, task_pred_probs = validate_model( - model, val_loader, device, config - ) - task_metrics = calculate_task_specific_metrics(task_true_labels, task_pred_labels) - - log_metrics(task_metrics, val_loss, config, writer, config["epochs"]) - writer.close() - - save_validation_predictions( - val_cell_id_mapping, task_true_labels, task_pred_labels, task_pred_probs, config - ) - - if config.get("use_wandb", False): - import wandb - - wandb.finish() - - print(f"\nFinal Validation Loss: {val_loss:.4f}") - return val_loss, model # Return both the validation loss and the trained model - - -def objective( - trial, - train_loader, - val_loader, - train_cell_id_mapping, - val_cell_id_mapping, - num_labels_list, - config, - device, -): - set_seed(config["seed"]) # Set the seed before each trial - initialize_wandb(config) - - # Hyperparameters - config["learning_rate"] = trial.suggest_float( - "learning_rate", - config["hyperparameters"]["learning_rate"]["low"], - config["hyperparameters"]["learning_rate"]["high"], - log=config["hyperparameters"]["learning_rate"]["log"], - ) - config["warmup_ratio"] = trial.suggest_float( - "warmup_ratio", - config["hyperparameters"]["warmup_ratio"]["low"], - config["hyperparameters"]["warmup_ratio"]["high"], - ) - config["weight_decay"] = trial.suggest_float( - "weight_decay", - config["hyperparameters"]["weight_decay"]["low"], - config["hyperparameters"]["weight_decay"]["high"], - ) - config["dropout_rate"] = trial.suggest_float( - "dropout_rate", - config["hyperparameters"]["dropout_rate"]["low"], - config["hyperparameters"]["dropout_rate"]["high"], - ) - config["lr_scheduler_type"] = trial.suggest_categorical( - "lr_scheduler_type", config["hyperparameters"]["lr_scheduler_type"]["choices"] - ) - config["use_attention_pooling"] = trial.suggest_categorical( - "use_attention_pooling", [False] - ) - - if config["use_task_weights"]: - config["task_weights"] = [ - trial.suggest_float( - f"task_weight_{i}", - config["hyperparameters"]["task_weights"]["low"], - config["hyperparameters"]["task_weights"]["high"], - ) - for i in range(len(num_labels_list)) - ] - weight_sum = sum(config["task_weights"]) - config["task_weights"] = [ - weight / weight_sum for weight in config["task_weights"] - ] - else: - config["task_weights"] = None - - # Dynamic range for max_layers_to_freeze - freeze_range = get_layer_freeze_range(config["pretrained_path"]) - config["max_layers_to_freeze"] = trial.suggest_int( - "max_layers_to_freeze", - freeze_range["min"], - freeze_range["max"] - ) - - model = create_model(config, num_labels_list, device) - total_steps = len(train_loader) * config["epochs"] - optimizer, scheduler = setup_optimizer_and_scheduler(model, config, total_steps) - - log_dir = os.path.join(config["tensorboard_log_dir"], f"trial_{trial.number}") - writer = SummaryWriter(log_dir=log_dir) - - for epoch in range(config["epochs"]): - train_epoch( - model, train_loader, optimizer, scheduler, device, config, writer, epoch - ) - - val_loss, task_true_labels, task_pred_labels, task_pred_probs = validate_model( - model, val_loader, device, config - ) - task_metrics = calculate_task_specific_metrics(task_true_labels, task_pred_labels) - - log_metrics(task_metrics, val_loss, config, writer, config["epochs"]) - writer.close() - - save_validation_predictions( - val_cell_id_mapping, - task_true_labels, - task_pred_labels, - task_pred_probs, - config, - trial.number, - ) - - trial.set_user_attr("model_state_dict", model.state_dict()) - trial.set_user_attr("task_weights", config["task_weights"]) - - trial.report(val_loss, config["epochs"]) - - if trial.should_prune(): - raise optuna.TrialPruned() - - if config.get("use_wandb", False): - import wandb - - wandb.log( - { - "trial_number": trial.number, - "val_loss": val_loss, - **{ - f"{task_name}_f1": metrics["f1"] - for task_name, metrics in task_metrics.items() - }, - **{ - f"{task_name}_accuracy": metrics["accuracy"] - for task_name, metrics in task_metrics.items() - }, - **{ - k: v - for k, v in config.items() - if k - in [ - "learning_rate", - "warmup_ratio", - "weight_decay", - "dropout_rate", - "lr_scheduler_type", - "use_attention_pooling", - "max_layers_to_freeze", - ] - }, - } - ) - wandb.finish() - - return val_loss diff --git a/geneformer/mtl/train_utils.py b/geneformer/mtl/train_utils.py deleted file mode 100644 index 430994a37a53dcde99666a7b5a4d99532e9bc8ba..0000000000000000000000000000000000000000 --- a/geneformer/mtl/train_utils.py +++ /dev/null @@ -1,161 +0,0 @@ -import random - -from .data import get_data_loader, preload_and_process_data -from .imports import * -from .model import GeneformerMultiTask -from .train import objective, train_model -from .utils import save_model - - -def set_seed(seed): - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - torch.backends.cudnn.deterministic = True - torch.backends.cudnn.benchmark = False - - -def run_manual_tuning(config): - # Set seed for reproducibility - set_seed(config["seed"]) - - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - ( - train_dataset, - train_cell_id_mapping, - val_dataset, - val_cell_id_mapping, - num_labels_list, - ) = preload_and_process_data(config) - train_loader = get_data_loader(train_dataset, config["batch_size"]) - val_loader = get_data_loader(val_dataset, config["batch_size"]) - - # Print the manual hyperparameters being used - print("\nManual hyperparameters being used:") - for key, value in config["manual_hyperparameters"].items(): - print(f"{key}: {value}") - print() # Add an empty line for better readability - - # Use the manual hyperparameters - for key, value in config["manual_hyperparameters"].items(): - config[key] = value - - # Train the model - val_loss, trained_model = train_model( - config, - device, - train_loader, - val_loader, - train_cell_id_mapping, - val_cell_id_mapping, - num_labels_list, - ) - - print(f"\nValidation loss with manual hyperparameters: {val_loss}") - - # Save the trained model - model_save_directory = os.path.join( - config["model_save_path"], "GeneformerMultiTask" - ) - save_model(trained_model, model_save_directory) - - # Save the hyperparameters - hyperparams_to_save = { - **config["manual_hyperparameters"], - "dropout_rate": config["dropout_rate"], - "use_task_weights": config["use_task_weights"], - "task_weights": config["task_weights"], - "max_layers_to_freeze": config["max_layers_to_freeze"], - "use_attention_pooling": config["use_attention_pooling"], - } - hyperparams_path = os.path.join(model_save_directory, "hyperparameters.json") - with open(hyperparams_path, "w") as f: - json.dump(hyperparams_to_save, f) - print(f"Manual hyperparameters saved to {hyperparams_path}") - - return val_loss - - -def run_optuna_study(config): - # Set seed for reproducibility - set_seed(config["seed"]) - - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - ( - train_dataset, - train_cell_id_mapping, - val_dataset, - val_cell_id_mapping, - num_labels_list, - ) = preload_and_process_data(config) - train_loader = get_data_loader(train_dataset, config["batch_size"]) - val_loader = get_data_loader(val_dataset, config["batch_size"]) - - if config["use_manual_hyperparameters"]: - train_model( - config, - device, - train_loader, - val_loader, - train_cell_id_mapping, - val_cell_id_mapping, - num_labels_list, - ) - else: - objective_with_config_and_data = functools.partial( - objective, - train_loader=train_loader, - val_loader=val_loader, - train_cell_id_mapping=train_cell_id_mapping, - val_cell_id_mapping=val_cell_id_mapping, - num_labels_list=num_labels_list, - config=config, - device=device, - ) - - study = optuna.create_study( - direction="minimize", # Minimize validation loss - study_name=config["study_name"], - # storage=config["storage"], - load_if_exists=True, - ) - - study.optimize(objective_with_config_and_data, n_trials=config["n_trials"]) - - # After finding the best trial - best_params = study.best_trial.params - best_task_weights = study.best_trial.user_attrs["task_weights"] - print("Saving the best model and its hyperparameters...") - - # Saving model as before - best_model = GeneformerMultiTask( - config["pretrained_path"], - num_labels_list, - dropout_rate=best_params["dropout_rate"], - use_task_weights=config["use_task_weights"], - task_weights=best_task_weights, - ) - - # Get the best model state dictionary - best_model_state_dict = study.best_trial.user_attrs["model_state_dict"] - - # Remove the "module." prefix from the state dictionary keys if present - best_model_state_dict = { - k.replace("module.", ""): v for k, v in best_model_state_dict.items() - } - - # Load the modified state dictionary into the model, skipping unexpected keys - best_model.load_state_dict(best_model_state_dict, strict=False) - - model_save_directory = os.path.join( - config["model_save_path"], "GeneformerMultiTask" - ) - save_model(best_model, model_save_directory) - - # Additionally, save the best hyperparameters and task weights - hyperparams_path = os.path.join(model_save_directory, "hyperparameters.json") - - with open(hyperparams_path, "w") as f: - json.dump({**best_params, "task_weights": best_task_weights}, f) - print(f"Best hyperparameters and task weights saved to {hyperparams_path}") diff --git a/geneformer/mtl/utils.py b/geneformer/mtl/utils.py deleted file mode 100644 index 5de5079ffdefb853a183038a6b3956de42f19978..0000000000000000000000000000000000000000 --- a/geneformer/mtl/utils.py +++ /dev/null @@ -1,129 +0,0 @@ -import os -import shutil - -from sklearn.metrics import accuracy_score, f1_score -from sklearn.preprocessing import LabelEncoder -from transformers import AutoConfig, BertConfig, BertModel - -from .imports import * - - -def save_model(model, model_save_directory): - if not os.path.exists(model_save_directory): - os.makedirs(model_save_directory) - - # Get the state dict - if isinstance(model, nn.DataParallel): - model_state_dict = ( - model.module.state_dict() - ) # Use model.module to access the underlying model - else: - model_state_dict = model.state_dict() - - # Remove the "module." prefix from the keys if present - model_state_dict = { - k.replace("module.", ""): v for k, v in model_state_dict.items() - } - - model_save_path = os.path.join(model_save_directory, "pytorch_model.bin") - torch.save(model_state_dict, model_save_path) - - # Save the model configuration - if isinstance(model, nn.DataParallel): - model.module.config.to_json_file( - os.path.join(model_save_directory, "config.json") - ) - else: - model.config.to_json_file(os.path.join(model_save_directory, "config.json")) - - print(f"Model and configuration saved to {model_save_directory}") - - -def calculate_task_specific_metrics(task_true_labels, task_pred_labels): - task_metrics = {} - for task_name in task_true_labels.keys(): - true_labels = task_true_labels[task_name] - pred_labels = task_pred_labels[task_name] - f1 = f1_score(true_labels, pred_labels, average="macro") - accuracy = accuracy_score(true_labels, pred_labels) - task_metrics[task_name] = {"f1": f1, "accuracy": accuracy} - return task_metrics - - -def calculate_combined_f1(combined_labels, combined_preds): - # Initialize the LabelEncoder - le = LabelEncoder() - - # Fit and transform combined labels and predictions to numerical values - le.fit(combined_labels + combined_preds) - encoded_true_labels = le.transform(combined_labels) - encoded_pred_labels = le.transform(combined_preds) - - # Print out the mapping for sanity check - print("\nLabel Encoder Mapping:") - for index, class_label in enumerate(le.classes_): - print(f"'{class_label}': {index}") - - # Calculate accuracy - accuracy = accuracy_score(encoded_true_labels, encoded_pred_labels) - - # Calculate F1 Macro score - f1 = f1_score(encoded_true_labels, encoded_pred_labels, average="macro") - - return f1, accuracy - - -# def save_model_without_heads(original_model_save_directory): -# # Create a new directory for the model without heads -# new_model_save_directory = original_model_save_directory + "_No_Heads" -# if not os.path.exists(new_model_save_directory): -# os.makedirs(new_model_save_directory) - -# # Load the model state dictionary -# model_state_dict = torch.load( -# os.path.join(original_model_save_directory, "pytorch_model.bin") -# ) - -# # Initialize a new BERT model without the classification heads -# config = BertConfig.from_pretrained( -# os.path.join(original_model_save_directory, "config.json") -# ) -# model_without_heads = BertModel(config) - -# # Filter the state dict to exclude classification heads -# model_without_heads_state_dict = { -# k: v -# for k, v in model_state_dict.items() -# if not k.startswith("classification_heads") -# } - -# # Load the filtered state dict into the model -# model_without_heads.load_state_dict(model_without_heads_state_dict, strict=False) - -# # Save the model without heads -# model_save_path = os.path.join(new_model_save_directory, "pytorch_model.bin") -# torch.save(model_without_heads.state_dict(), model_save_path) - -# # Copy the configuration file -# shutil.copy( -# os.path.join(original_model_save_directory, "config.json"), -# new_model_save_directory, -# ) - -# print(f"Model without classification heads saved to {new_model_save_directory}") - - -def get_layer_freeze_range(pretrained_path): - """ - Dynamically determines the number of layers to freeze based on the model depth from its configuration. - Args: - pretrained_path (str): Path to the pretrained model directory or model identifier. - Returns: - dict: A dictionary with 'min' and 'max' keys indicating the range of layers to freeze. - """ - if pretrained_path: - config = AutoConfig.from_pretrained(pretrained_path) - total_layers = config.num_hidden_layers - return {"min": 0, "max": total_layers - 1} - else: - return {"min": 0, "max": 0} diff --git a/geneformer/mtl_classifier.py b/geneformer/mtl_classifier.py deleted file mode 100644 index 68ee837a416e27d9e20156100e30718dec6778d0..0000000000000000000000000000000000000000 --- a/geneformer/mtl_classifier.py +++ /dev/null @@ -1,363 +0,0 @@ -""" -Geneformer multi-task cell classifier. - -**Input data:** - -| Single-cell transcriptomes as Geneformer rank value encodings with cell state labels for each task in Geneformer .dataset format (generated from single-cell RNAseq data by tokenizer.py). Must contain "unique_cell_id" column for logging. - -**Usage:** - -.. code-block :: python - - >>> from geneformer import MTLClassifier - >>> mc = MTLClassifier(task_columns = ["task1", "task2"], - ... study_name = "mtl", - ... pretrained_path = "/path/pretrained/model", - ... train_path = "/path/train/set", - ... val_path = "/path/eval/set", - ... test_path = "/path/test/set", - ... model_save_path = "/results/directory/save_path", - ... trials_result_path = "/results/directory/results.txt", - ... results_dir = "/results/directory", - ... tensorboard_log_dir = "/results/tblogdir", - ... hyperparameters = hyperparameters) - >>> mc.run_optuna_study() - >>> mc.load_and_evaluate_test_model() - >>> mc.save_model_without_heads() -""" - -import logging -import os - -from .mtl import eval_utils, train_utils, utils - -logger = logging.getLogger(__name__) - - -class MTLClassifier: - valid_option_dict = { - "task_columns": {list}, - "train_path": {None, str}, - "val_path": {None, str}, - "test_path": {None, str}, - "pretrained_path": {None, str}, - "model_save_path": {None, str}, - "results_dir": {None, str}, - "batch_size": {None, int}, - "n_trials": {None, int}, - "study_name": {None, str}, - "max_layers_to_freeze": {None, dict}, - "epochs": {None, int}, - "tensorboard_log_dir": {None, str}, - "use_data_parallel": {None, bool}, - "use_attention_pooling": {None, bool}, - "use_task_weights": {None, bool}, - "hyperparameters": {None, dict}, - "manual_hyperparameters": {None, dict}, - "use_manual_hyperparameters": {None, bool}, - "use_wandb": {None, bool}, - "wandb_project": {None, str}, - "gradient_clipping": {None, bool}, - "max_grad_norm": {None, int, float}, - "seed": {None, int}, - "trials_result_path": {None, str}, - } - - def __init__( - self, - task_columns=None, - train_path=None, - val_path=None, - test_path=None, - pretrained_path=None, - model_save_path=None, - results_dir=None, - trials_result_path=None, - batch_size=4, - n_trials=15, - study_name="mtl", - max_layers_to_freeze=None, - epochs=1, - tensorboard_log_dir="/results/tblogdir", - use_data_parallel=False, - use_attention_pooling=True, - use_task_weights=True, - hyperparameters=None, # Default is None - manual_hyperparameters=None, # Default is None - use_manual_hyperparameters=False, # Default is False - use_wandb=False, - wandb_project=None, - gradient_clipping=False, - max_grad_norm=None, - seed=42, # Default seed value - ): - """ - Initialize Geneformer multi-task classifier. - - **Parameters:** - - task_columns : list - | List of tasks for cell state classification - | Input data columns are labeled with corresponding task names - study_name : None, str - | Study name for labeling output files - pretrained_path : None, str - | Path to pretrained model - train_path : None, str - | Path to training dataset with task columns and "unique_cell_id" column - val_path : None, str - | Path to validation dataset with task columns and "unique_cell_id" column - test_path : None, str - | Path to test dataset with task columns and "unique_cell_id" column - model_save_path : None, str - | Path to directory to save output model (either full model or model without heads) - trials_result_path : None, str - | Path to directory to save hyperparameter tuning trial results - results_dir : None, str - | Path to directory to save results - tensorboard_log_dir : None, str - | Path to directory for Tensorboard logging results - use_data_parallel : None, bool - | Whether to use data parallelization - use_attention_pooling : None, bool - | Whether to use attention pooling - use_task_weights : None, bool - | Whether to use task weights - batch_size : None, int - | Batch size to use - n_trials : None, int - | Number of trials for hyperparameter tuning - epochs : None, int - | Number of epochs for training - max_layers_to_freeze : None, dict - | Dictionary with keys "min" and "max" indicating the min and max layers to freeze from fine-tuning (int) - | 0: no layers will be frozen; 2: first two layers will be frozen; etc. - hyperparameters : None, dict - | Dictionary of categorical max and min for each hyperparameter for tuning - | For example: - | {"learning_rate": {"type":"float", "low":"1e-5", "high":"1e-3", "log":True}, "task_weights": {...}, ...} - manual_hyperparameters : None, dict - | Dictionary of manually set value for each hyperparameter - | For example: - | {"learning_rate": 0.001, "task_weights": [1, 1], ...} - use_manual_hyperparameters : None, bool - | Whether to use manually set hyperparameters - use_wandb : None, bool - | Whether to use Weights & Biases for logging - wandb_project : None, str - | Weights & Biases project name - gradient_clipping : None, bool - | Whether to use gradient clipping - max_grad_norm : None, int, float - | Maximum norm for gradient clipping - seed : None, int - | Random seed - """ - - self.task_columns = task_columns - self.train_path = train_path - self.val_path = val_path - self.test_path = test_path - self.pretrained_path = pretrained_path - self.model_save_path = model_save_path - self.results_dir = results_dir - self.trials_result_path = trials_result_path - self.batch_size = batch_size - self.n_trials = n_trials - self.study_name = study_name - - if max_layers_to_freeze is None: - # Dynamically determine the range of layers to freeze - layer_freeze_range = utils.get_layer_freeze_range(pretrained_path) - self.max_layers_to_freeze = {"min": 1, "max": layer_freeze_range["max"]} - else: - self.max_layers_to_freeze = max_layers_to_freeze - - self.epochs = epochs - self.tensorboard_log_dir = tensorboard_log_dir - self.use_data_parallel = use_data_parallel - self.use_attention_pooling = use_attention_pooling - self.use_task_weights = use_task_weights - self.hyperparameters = ( - hyperparameters - if hyperparameters is not None - else { - "learning_rate": { - "type": "float", - "low": 1e-5, - "high": 1e-3, - "log": True, - }, - "warmup_ratio": {"type": "float", "low": 0.005, "high": 0.01}, - "weight_decay": {"type": "float", "low": 0.01, "high": 0.1}, - "dropout_rate": {"type": "float", "low": 0.0, "high": 0.7}, - "lr_scheduler_type": {"type": "categorical", "choices": ["cosine"]}, - "task_weights": {"type": "float", "low": 0.1, "high": 2.0}, - } - ) - self.manual_hyperparameters = ( - manual_hyperparameters - if manual_hyperparameters is not None - else { - "learning_rate": 0.001, - "warmup_ratio": 0.01, - "weight_decay": 0.1, - "dropout_rate": 0.1, - "lr_scheduler_type": "cosine", - "use_attention_pooling": False, - "task_weights": [1, 1], - "max_layers_to_freeze": 2, - } - ) - self.use_manual_hyperparameters = use_manual_hyperparameters - self.use_wandb = use_wandb - self.wandb_project = wandb_project - self.gradient_clipping = gradient_clipping - self.max_grad_norm = max_grad_norm - self.seed = seed - - if self.use_manual_hyperparameters: - logger.warning( - "Hyperparameter tuning is highly recommended for optimal results." - ) - - self.validate_options() - - # set up output directories - if self.results_dir is not None: - self.trials_results_path = f"{self.results_dir}/results.txt".replace( - "//", "/" - ) - - for output_dir in [self.model_save_path, self.results_dir]: - if not os.path.exists(output_dir): - os.makedirs(output_dir) - - self.config = { - key: value - for key, value in self.__dict__.items() - if key in self.valid_option_dict - } - - def validate_options(self): - # confirm arguments are within valid options and compatible with each other - for attr_name, valid_options in self.valid_option_dict.items(): - attr_value = self.__dict__[attr_name] - if not isinstance(attr_value, (list, dict)): - if attr_value in valid_options: - continue - valid_type = False - for option in valid_options: - if (option in [int, float, list, dict, bool, str]) and isinstance( - attr_value, option - ): - valid_type = True - break - if valid_type: - continue - logger.error( - f"Invalid option for {attr_name}. " - f"Valid options for {attr_name}: {valid_options}" - ) - raise ValueError( - f"Invalid option for {attr_name}. Valid options for {attr_name}: {valid_options}" - ) - - def run_manual_tuning(self): - """ - Manual hyperparameter tuning and multi-task fine-tuning of pretrained model. - """ - required_variable_names = [ - "train_path", - "val_path", - "pretrained_path", - "model_save_path", - "results_dir", - ] - required_variables = [ - self.train_path, - self.val_path, - self.pretrained_path, - self.model_save_path, - self.results_dir, - ] - req_var_dict = dict(zip(required_variable_names, required_variables)) - self.validate_additional_options(req_var_dict) - - if not self.use_manual_hyperparameters: - raise ValueError( - "Manual hyperparameters are not enabled. Set use_manual_hyperparameters to True." - ) - - # Ensure manual_hyperparameters are set in the config - self.config["manual_hyperparameters"] = self.manual_hyperparameters - self.config["use_manual_hyperparameters"] = True - - train_utils.run_manual_tuning(self.config) - - def validate_additional_options(self, req_var_dict): - missing_variable = False - for variable_name, variable in req_var_dict.items(): - if variable is None: - logger.warning( - f"Please provide value to MTLClassifier for required variable {variable_name}" - ) - missing_variable = True - if missing_variable is True: - raise ValueError("Missing required variables for MTLClassifier") - - def run_optuna_study( - self, - ): - """ - Hyperparameter optimization and/or multi-task fine-tuning of pretrained model. - """ - - required_variable_names = [ - "train_path", - "val_path", - "pretrained_path", - "model_save_path", - "results_dir", - ] - required_variables = [ - self.train_path, - self.val_path, - self.pretrained_path, - self.model_save_path, - self.results_dir, - ] - req_var_dict = dict(zip(required_variable_names, required_variables)) - self.validate_additional_options(req_var_dict) - - train_utils.run_optuna_study(self.config) - - def load_and_evaluate_test_model( - self, - ): - """ - Loads previously fine-tuned multi-task model and evaluates on test data. - """ - - required_variable_names = ["test_path", "model_save_path", "results_dir"] - required_variables = [self.test_path, self.model_save_path, self.results_dir] - req_var_dict = dict(zip(required_variable_names, required_variables)) - self.validate_additional_options(req_var_dict) - - eval_utils.load_and_evaluate_test_model(self.config) - - # def save_model_without_heads( - # self, - # ): - # """ - # Save previously fine-tuned multi-task model without classification heads. - # """ - - # required_variable_names = ["model_save_path"] - # required_variables = [self.model_save_path] - # req_var_dict = dict(zip(required_variable_names, required_variables)) - # self.validate_additional_options(req_var_dict) - - # utils.save_model_without_heads( - # os.path.join(self.model_save_path, "GeneformerMultiTask") - # ) diff --git a/geneformer/perturber_utils.py b/geneformer/perturber_utils.py deleted file mode 100644 index e7091a2f9df2e7fcb944083a3029734bce7a9328..0000000000000000000000000000000000000000 --- a/geneformer/perturber_utils.py +++ /dev/null @@ -1,919 +0,0 @@ -import itertools as it -import logging -import pickle -from collections import defaultdict -from pathlib import Path -from typing import List - -import numpy as np -import pandas as pd -import torch -from datasets import Dataset, load_from_disk -from peft import LoraConfig, get_peft_model -from transformers import ( - BertForMaskedLM, - BertForSequenceClassification, - BertForTokenClassification, - BitsAndBytesConfig, -) - -GENE_MEDIAN_FILE = Path(__file__).parent / "gene_median_dictionary.pkl" -TOKEN_DICTIONARY_FILE = Path(__file__).parent / "token_dictionary.pkl" -ENSEMBL_DICTIONARY_FILE = Path(__file__).parent / "gene_name_id_dict.pkl" - - -logger = logging.getLogger(__name__) - - -# load data and filter by defined criteria -def load_and_filter(filter_data, nproc, input_data_file): - data = load_from_disk(input_data_file) - if filter_data is not None: - data = filter_by_dict(data, filter_data, nproc) - return data - - -def filter_by_dict(data, filter_data, nproc): - for key, value in filter_data.items(): - - def filter_data_by_criteria(example): - return example[key] in value - - data = data.filter(filter_data_by_criteria, num_proc=nproc) - if len(data) == 0: - logger.error("No cells remain after filtering. Check filtering criteria.") - raise - return data - - -def filter_data_by_tokens(filtered_input_data, tokens, nproc): - def if_has_tokens(example): - return len(set(example["input_ids"]).intersection(tokens)) == len(tokens) - - filtered_input_data = filtered_input_data.filter(if_has_tokens, num_proc=nproc) - return filtered_input_data - - -def logging_filtered_data_len(filtered_input_data, filtered_tokens_categ): - if len(filtered_input_data) == 0: - logger.error(f"No cells in dataset contain {filtered_tokens_categ}.") - raise - else: - logger.info(f"# cells with {filtered_tokens_categ}: {len(filtered_input_data)}") - - -def filter_data_by_tokens_and_log( - filtered_input_data, tokens, nproc, filtered_tokens_categ -): - # filter for cells with anchor gene - filtered_input_data = filter_data_by_tokens(filtered_input_data, tokens, nproc) - # logging length of filtered data - logging_filtered_data_len(filtered_input_data, filtered_tokens_categ) - - return filtered_input_data - - -def filter_data_by_start_state(filtered_input_data, cell_states_to_model, nproc): - # confirm that start state is valid to prevent futile filtering - state_key = cell_states_to_model["state_key"] - state_values = filtered_input_data[state_key] - start_state = cell_states_to_model["start_state"] - if start_state not in state_values: - logger.error( - f"Start state {start_state} is not present " - f"in the dataset's {state_key} attribute." - ) - raise - - # filter for start state cells - def filter_for_origin(example): - return example[state_key] in [start_state] - - filtered_input_data = filtered_input_data.filter(filter_for_origin, num_proc=nproc) - return filtered_input_data - - -def slice_by_inds_to_perturb(filtered_input_data, cell_inds_to_perturb): - if cell_inds_to_perturb["start"] >= len(filtered_input_data): - logger.error( - "cell_inds_to_perturb['start'] is larger than the filtered dataset." - ) - raise - if cell_inds_to_perturb["end"] > len(filtered_input_data): - logger.warning( - "cell_inds_to_perturb['end'] is larger than the filtered dataset. \ - Setting to the end of the filtered dataset." - ) - cell_inds_to_perturb["end"] = len(filtered_input_data) - filtered_input_data = filtered_input_data.select( - [i for i in range(cell_inds_to_perturb["start"], cell_inds_to_perturb["end"])] - ) - return filtered_input_data - - -# load model to GPU -def load_model(model_type, num_classes, model_directory, mode, quantize=False): - if model_type == "MTLCellClassifier-Quantized": - model_type = "MTLCellClassifier" - quantize = True - - output_hidden_states = (mode == "eval") - - # Quantization logic - if quantize: - if model_type == "MTLCellClassifier": - quantize_config = BitsAndBytesConfig(load_in_8bit=True) - peft_config = None - else: - quantize_config = BitsAndBytesConfig( - load_in_4bit=True, - bnb_4bit_use_double_quant=True, - bnb_4bit_quant_type="nf4", - bnb_4bit_compute_dtype=torch.bfloat16, - ) - peft_config = LoraConfig( - lora_alpha=128, - lora_dropout=0.1, - r=64, - bias="none", - task_type="TokenClassification", - ) - else: - quantize_config = None - peft_config = None - - # Model class selection - model_classes = { - "Pretrained": BertForMaskedLM, - "GeneClassifier": BertForTokenClassification, - "CellClassifier": BertForSequenceClassification, - "MTLCellClassifier": BertForMaskedLM - } - - model_class = model_classes.get(model_type) - if not model_class: - raise ValueError(f"Unknown model type: {model_type}") - - # Model loading - model_args = { - "pretrained_model_name_or_path": model_directory, - "output_hidden_states": output_hidden_states, - "output_attentions": False, - } - - if model_type != "Pretrained": - model_args["num_labels"] = num_classes - - if quantize_config: - model_args["quantization_config"] = quantize_config - - # Load the model - model = model_class.from_pretrained(**model_args) - - if mode == "eval": - model.eval() - - # Handle device placement and PEFT - if not quantize: - # Only move non-quantized models - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - model = model.to(device) - elif peft_config: - # Apply PEFT for quantized models (except MTLCellClassifier) - model.enable_input_require_grads() - model = get_peft_model(model, peft_config) - - return model - -def quant_layers(model): - layer_nums = [] - for name, parameter in model.named_parameters(): - if "layer" in name: - layer_nums += [int(name.split("layer.")[1].split(".")[0])] - return int(max(layer_nums)) + 1 - - -def get_model_emb_dims(model): - return model.config.hidden_size - - -def get_model_input_size(model): - return model.config.max_position_embeddings - - -def flatten_list(megalist): - return [item for sublist in megalist for item in sublist] - - -def measure_length(example): - example["length"] = len(example["input_ids"]) - return example - - -def downsample_and_sort(data, max_ncells): - num_cells = len(data) - # if max number of cells is defined, then shuffle and subsample to this max number - if max_ncells is not None: - if num_cells > max_ncells: - data = data.shuffle(seed=42) - num_cells = max_ncells - data_subset = data.select([i for i in range(num_cells)]) - # sort dataset with largest cell first to encounter any memory errors earlier - data_sorted = data_subset.sort("length", reverse=True) - return data_sorted - - -def get_possible_states(cell_states_to_model): - possible_states = [] - for key in ["start_state", "goal_state"]: - possible_states += [cell_states_to_model[key]] - possible_states += cell_states_to_model.get("alt_states", []) - return possible_states - - -def forward_pass_single_cell(model, example_cell, layer_to_quant): - example_cell.set_format(type="torch") - input_data = example_cell["input_ids"] - with torch.no_grad(): - outputs = model(input_ids=input_data.to("cuda")) - emb = torch.squeeze(outputs.hidden_states[layer_to_quant]) - del outputs - return emb - - -def perturb_emb_by_index(emb, indices): - mask = torch.ones(emb.numel(), dtype=torch.bool) - mask[indices] = False - return emb[mask] - - -def delete_indices(example): - indices = example["perturb_index"] - if any(isinstance(el, list) for el in indices): - indices = flatten_list(indices) - for index in sorted(indices, reverse=True): - del example["input_ids"][index] - - example["length"] = len(example["input_ids"]) - return example - - -# for genes_to_perturb = "all" where only genes within cell are overexpressed -def overexpress_indices(example): - indices = example["perturb_index"] - if any(isinstance(el, list) for el in indices): - indices = flatten_list(indices) - insert_pos = 0 - for index in sorted(indices, reverse=False): - example["input_ids"].insert(insert_pos, example["input_ids"].pop(index)) - insert_pos += 1 - example["length"] = len(example["input_ids"]) - return example - - -# if CLS token present, move to 1st rather than 0th position -def overexpress_indices_special(example): - indices = example["perturb_index"] - if any(isinstance(el, list) for el in indices): - indices = flatten_list(indices) - insert_pos = 1 # Insert starting after CLS token - for index in sorted(indices, reverse=False): - example["input_ids"].insert(insert_pos, example["input_ids"].pop(index)) - insert_pos += 1 - example["length"] = len(example["input_ids"]) - return example - - -# for genes_to_perturb = list of genes to overexpress that are not necessarily expressed in cell -def overexpress_tokens(example, max_len, special_token): - # -100 indicates tokens to overexpress are not present in rank value encoding - if example["perturb_index"] != [-100]: - example = delete_indices(example) - if special_token: - [ - example["input_ids"].insert(1, token) - for token in example["tokens_to_perturb"][::-1] - ] - else: - [ - example["input_ids"].insert(0, token) - for token in example["tokens_to_perturb"][::-1] - ] - - # truncate to max input size, must also truncate original emb to be comparable - if len(example["input_ids"]) > max_len: - if special_token: - example["input_ids"] = example["input_ids"][0 : max_len - 1] + [ - example["input_ids"][-1] - ] - else: - example["input_ids"] = example["input_ids"][0:max_len] - example["length"] = len(example["input_ids"]) - return example - - -def calc_n_overflow(max_len, example_len, tokens_to_perturb, indices_to_perturb): - n_to_add = len(tokens_to_perturb) - len(indices_to_perturb) - n_overflow = example_len + n_to_add - max_len - return n_overflow - - -def truncate_by_n_overflow(example): - new_max_len = example["length"] - example["n_overflow"] - example["input_ids"] = example["input_ids"][0:new_max_len] - example["length"] = len(example["input_ids"]) - return example - - -def truncate_by_n_overflow_special(example): - if example["n_overflow"] > 0: - new_max_len = example["length"] - example["n_overflow"] - example["input_ids"] = example["input_ids"][0 : new_max_len - 1] + [ - example["input_ids"][-1] - ] - example["length"] = len(example["input_ids"]) - return example - - -def remove_indices_from_emb(emb, indices_to_remove, gene_dim): - # indices_to_remove is list of indices to remove - indices_to_keep = [ - i for i in range(emb.size()[gene_dim]) if i not in indices_to_remove - ] - num_dims = emb.dim() - emb_slice = [ - slice(None) if dim != gene_dim else indices_to_keep for dim in range(num_dims) - ] - sliced_emb = emb[emb_slice] - return sliced_emb - - -def remove_indices_from_emb_batch(emb_batch, list_of_indices_to_remove, gene_dim): - output_batch_list = [ - remove_indices_from_emb(emb_batch[i, :, :], idxes, gene_dim - 1) - for i, idxes in enumerate(list_of_indices_to_remove) - ] - # add padding given genes are sometimes added that are or are not in original cell - batch_max = max([emb.size()[gene_dim - 1] for emb in output_batch_list]) - output_batch_list_padded = [ - pad_xd_tensor(emb, 0.000, batch_max, gene_dim - 1) for emb in output_batch_list - ] - return torch.stack(output_batch_list_padded) - - -# removes perturbed indices -# need to handle the various cases where a set of genes is overexpressed -def remove_perturbed_indices_set( - emb, - perturb_type: str, - indices_to_perturb: List[List], - tokens_to_perturb: List[List], - original_lengths: List[int], - input_ids=None, -): - if perturb_type == "overexpress": - num_perturbed = len(tokens_to_perturb) - if num_perturbed == 1: - indices_to_perturb_orig = [ - idx if idx != [-100] else [None] for idx in indices_to_perturb - ] - if all(v is [None] for v in indices_to_perturb_orig): - return emb - else: - indices_to_perturb_orig = [] - - for idx_list in indices_to_perturb: - indices_to_perturb_orig.append( - [idx if idx != [-100] else [None] for idx in idx_list] - ) - - else: - indices_to_perturb_orig = indices_to_perturb - - emb = remove_indices_from_emb_batch(emb, indices_to_perturb_orig, gene_dim=1) - - return emb - - -def make_perturbation_batch( - example_cell, perturb_type, tokens_to_perturb, anchor_token, combo_lvl, num_proc -) -> tuple[Dataset, List[int]]: - if combo_lvl == 0 and tokens_to_perturb == "all": - if perturb_type in ["overexpress", "activate"]: - range_start = 1 - elif perturb_type in ["delete", "inhibit"]: - range_start = 0 - indices_to_perturb = [ - [i] for i in range(range_start, example_cell["length"][0]) - ] - # elif combo_lvl > 0 and anchor_token is None: - ## to implement - elif combo_lvl > 0 and (anchor_token is not None): - example_input_ids = example_cell["input_ids"][0] - anchor_index = example_input_ids.index(anchor_token[0]) - indices_to_perturb = [ - sorted([anchor_index, i]) if i != anchor_index else None - for i in range(example_cell["length"][0]) - ] - indices_to_perturb = [item for item in indices_to_perturb if item is not None] - else: - example_input_ids = example_cell["input_ids"][0] - indices_to_perturb = [ - [example_input_ids.index(token)] if token in example_input_ids else None - for token in tokens_to_perturb - ] - indices_to_perturb = [item for item in indices_to_perturb if item is not None] - - # create all permutations of combo_lvl of modifiers from tokens_to_perturb - if combo_lvl > 0 and (anchor_token is None): - if tokens_to_perturb != "all": - if len(tokens_to_perturb) == combo_lvl + 1: - indices_to_perturb = [ - list(x) for x in it.combinations(indices_to_perturb, combo_lvl + 1) - ] - else: - all_indices = [[i] for i in range(example_cell["length"][0])] - all_indices = [ - index for index in all_indices if index not in indices_to_perturb - ] - indices_to_perturb = [ - [[j for i in indices_to_perturb for j in i], x] for x in all_indices - ] - - length = len(indices_to_perturb) - perturbation_dataset = Dataset.from_dict( - { - "input_ids": example_cell["input_ids"] * length, - "perturb_index": indices_to_perturb, - } - ) - - if length < 400: - num_proc_i = 1 - else: - num_proc_i = num_proc - - if perturb_type == "delete": - perturbation_dataset = perturbation_dataset.map( - delete_indices, num_proc=num_proc_i - ) - elif perturb_type == "overexpress": - perturbation_dataset = perturbation_dataset.map( - overexpress_indices, num_proc=num_proc_i - ) - - perturbation_dataset = perturbation_dataset.map(measure_length, num_proc=num_proc_i) - - return perturbation_dataset, indices_to_perturb - - -def make_perturbation_batch_special( - example_cell, perturb_type, tokens_to_perturb, anchor_token, combo_lvl, num_proc -) -> tuple[Dataset, List[int]]: - if combo_lvl == 0 and tokens_to_perturb == "all": - if perturb_type in ["overexpress", "activate"]: - range_start = 1 - elif perturb_type in ["delete", "inhibit"]: - range_start = 0 - range_start += 1 # Starting after the CLS token - indices_to_perturb = [ - [i] - for i in range( - range_start, example_cell["length"][0] - 1 - ) # And excluding the EOS token - ] - - # elif combo_lvl > 0 and anchor_token is None: - ## to implement - elif combo_lvl > 0 and (anchor_token is not None): - example_input_ids = example_cell["input_ids"][0] - anchor_index = example_input_ids.index(anchor_token[0]) - indices_to_perturb = [ - sorted([anchor_index, i]) if i != anchor_index else None - for i in range( - 1, example_cell["length"][0] - 1 - ) # Exclude CLS and EOS tokens - ] - indices_to_perturb = [item for item in indices_to_perturb if item is not None] - else: - example_input_ids = example_cell["input_ids"][0] - indices_to_perturb = [ - [example_input_ids.index(token)] if token in example_input_ids else None - for token in tokens_to_perturb - ] - indices_to_perturb = [item for item in indices_to_perturb if item is not None] - - # create all permutations of combo_lvl of modifiers from tokens_to_perturb - if combo_lvl > 0 and (anchor_token is None): - if tokens_to_perturb != "all": - if len(tokens_to_perturb) == combo_lvl + 1: - indices_to_perturb = [ - list(x) for x in it.combinations(indices_to_perturb, combo_lvl + 1) - ] - else: - all_indices = [ - [i] for i in range(1, example_cell["length"][0] - 1) - ] # Exclude CLS and EOS tokens - all_indices = [ - index for index in all_indices if index not in indices_to_perturb - ] - indices_to_perturb = [ - [[j for i in indices_to_perturb for j in i], x] for x in all_indices - ] - - length = len(indices_to_perturb) - perturbation_dataset = Dataset.from_dict( - { - "input_ids": example_cell["input_ids"] * length, - "perturb_index": indices_to_perturb, - } - ) - - if length < 400: - num_proc_i = 1 - else: - num_proc_i = num_proc - - if perturb_type == "delete": - perturbation_dataset = perturbation_dataset.map( - delete_indices, num_proc=num_proc_i - ) - elif perturb_type == "overexpress": - perturbation_dataset = perturbation_dataset.map( - overexpress_indices_special, num_proc=num_proc_i - ) - - perturbation_dataset = perturbation_dataset.map(measure_length, num_proc=num_proc_i) - - return perturbation_dataset, indices_to_perturb - - -# original cell emb removing the activated/overexpressed/inhibited gene emb -# so that only non-perturbed gene embeddings are compared to each other -# in original or perturbed context -def make_comparison_batch(original_emb_batch, indices_to_perturb, perturb_group): - all_embs_list = [] - - # if making comparison batch for multiple perturbations in single cell - if perturb_group is False: - # squeeze if single cell - if original_emb_batch.ndim == 3 and original_emb_batch.size()[0] == 1: - original_emb_batch = torch.squeeze(original_emb_batch) - original_emb_list = [original_emb_batch] * len(indices_to_perturb) - # if making comparison batch for single perturbation in multiple cells - elif perturb_group is True: - original_emb_list = original_emb_batch - - for original_emb, indices in zip(original_emb_list, indices_to_perturb): - if indices == [-100]: - all_embs_list += [original_emb[:]] - continue - - emb_list = [] - start = 0 - if any(isinstance(el, list) for el in indices): - indices = flatten_list(indices) - - # removes indices that were perturbed from the original embedding - for i in sorted(indices): - emb_list += [original_emb[start:i]] - start = i + 1 - - emb_list += [original_emb[start:]] - all_embs_list += [torch.cat(emb_list)] - - len_set = set([emb.size()[0] for emb in all_embs_list]) - if len(len_set) > 1: - max_len = max(len_set) - all_embs_list = [pad_2d_tensor(emb, None, max_len, 0) for emb in all_embs_list] - return torch.stack(all_embs_list) - - -def pad_list(input_ids, pad_token_id, max_len): - input_ids = np.pad( - input_ids, - (0, max_len - len(input_ids)), - mode="constant", - constant_values=pad_token_id, - ) - return input_ids - - -def pad_xd_tensor(tensor, pad_token_id, max_len, dim): - padding_length = max_len - tensor.size()[dim] - # Construct a padding configuration where all padding values are 0, except for the padding dimension - # 2 * number of dimensions (padding before and after for every dimension) - pad_config = [0] * 2 * tensor.dim() - # Set the padding after the desired dimension to the calculated padding length - pad_config[-2 * dim - 1] = padding_length - return torch.nn.functional.pad( - tensor, pad=pad_config, mode="constant", value=pad_token_id - ) - - -def pad_tensor(tensor, pad_token_id, max_len): - tensor = torch.nn.functional.pad( - tensor, pad=(0, max_len - tensor.numel()), mode="constant", value=pad_token_id - ) - - return tensor - - -def pad_2d_tensor(tensor, pad_token_id, max_len, dim): - if dim == 0: - pad = (0, 0, 0, max_len - tensor.size()[dim]) - elif dim == 1: - pad = (0, max_len - tensor.size()[dim], 0, 0) - tensor = torch.nn.functional.pad( - tensor, pad=pad, mode="constant", value=pad_token_id - ) - return tensor - - -def pad_3d_tensor(tensor, pad_token_id, max_len, dim): - if dim == 0: - raise Exception("dim 0 usually does not need to be padded.") - if dim == 1: - pad = (0, 0, 0, max_len - tensor.size()[dim]) - elif dim == 2: - pad = (0, max_len - tensor.size()[dim], 0, 0) - tensor = torch.nn.functional.pad( - tensor, pad=pad, mode="constant", value=pad_token_id - ) - return tensor - - -def pad_or_truncate_encoding(encoding, pad_token_id, max_len): - if isinstance(encoding, torch.Tensor): - encoding_len = encoding.size()[0] - elif isinstance(encoding, list): - encoding_len = len(encoding) - if encoding_len > max_len: - encoding = encoding[0:max_len] - elif encoding_len < max_len: - if isinstance(encoding, torch.Tensor): - encoding = pad_tensor(encoding, pad_token_id, max_len) - elif isinstance(encoding, list): - encoding = pad_list(encoding, pad_token_id, max_len) - return encoding - - -# pad list of tensors and convert to tensor -def pad_tensor_list( - tensor_list, - dynamic_or_constant, - pad_token_id, - model_input_size, - dim=None, - padding_func=None, -): - # determine maximum tensor length - if dynamic_or_constant == "dynamic": - max_len = max([tensor.squeeze().numel() for tensor in tensor_list]) - elif isinstance(dynamic_or_constant, int): - max_len = dynamic_or_constant - else: - max_len = model_input_size - logger.warning( - "If padding style is constant, must provide integer value. " - f"Setting padding to max input size {model_input_size}." - ) - - # pad all tensors to maximum length - if dim is None: - tensor_list = [ - pad_tensor(tensor, pad_token_id, max_len) for tensor in tensor_list - ] - else: - tensor_list = [ - padding_func(tensor, pad_token_id, max_len, dim) for tensor in tensor_list - ] - # return stacked tensors - if padding_func != pad_3d_tensor: - return torch.stack(tensor_list) - else: - return torch.cat(tensor_list, 0) - - -def gen_attention_mask(minibatch_encoding, max_len=None): - if max_len is None: - max_len = max(minibatch_encoding["length"]) - original_lens = minibatch_encoding["length"] - attention_mask = [ - [1] * original_len + [0] * (max_len - original_len) - if original_len <= max_len - else [1] * max_len - for original_len in original_lens - ] - return torch.tensor(attention_mask, device="cuda") - - -# get cell embeddings excluding padding -def mean_nonpadding_embs(embs, original_lens, dim=1): - # create a mask tensor based on padding lengths - mask = torch.arange(embs.size(dim), device=embs.device) < original_lens.unsqueeze(1) - if embs.dim() == 3: - # fill the masked positions in embs with zeros - masked_embs = embs.masked_fill(~mask.unsqueeze(2), 0.0) - - # compute the mean across the non-padding dimensions - mean_embs = masked_embs.sum(dim) / original_lens.view(-1, 1).float() - - elif embs.dim() == 2: - masked_embs = embs.masked_fill(~mask, 0.0) - mean_embs = masked_embs.sum(dim) / original_lens.float() - return mean_embs - - -# get cell embeddings when there is no padding -def compute_nonpadded_cell_embedding(embs, cell_emb_style): - if cell_emb_style == "mean_pool": - return torch.mean(embs, dim=embs.ndim - 2) - - -# quantify shifts for a set of genes -def quant_cos_sims( - perturbation_emb, - original_emb, - cell_states_to_model, - state_embs_dict, - emb_mode="gene", -): - if emb_mode == "gene": - cos = torch.nn.CosineSimilarity(dim=2) - elif emb_mode == "cell": - cos = torch.nn.CosineSimilarity(dim=1) - - # if emb_mode == "gene", can only calculate gene cos sims - # against original cell - if cell_states_to_model is None or emb_mode == "gene": - cos_sims = cos(perturbation_emb, original_emb).to("cuda") - - elif cell_states_to_model is not None and emb_mode == "cell": - possible_states = get_possible_states(cell_states_to_model) - cos_sims = dict(zip(possible_states, [[] for _ in range(len(possible_states))])) - for state in possible_states: - cos_sims[state] = cos_sim_shift( - original_emb, - perturbation_emb, - state_embs_dict[state].to("cuda"), # required to move to cuda here - cos, - ) - - return cos_sims - - -# calculate cos sim shift of perturbation with respect to origin and alternative cell -def cos_sim_shift(original_emb, perturbed_emb, end_emb, cos): - origin_v_end = cos(original_emb, end_emb) - perturb_v_end = cos(perturbed_emb, end_emb) - - return perturb_v_end - origin_v_end - - -def concatenate_cos_sims(cos_sims): - if isinstance(cos_sims, list): - return torch.cat(cos_sims) - else: - for state in cos_sims.keys(): - cos_sims[state] = torch.cat(cos_sims[state]) - return cos_sims - - -def write_perturbation_dictionary(cos_sims_dict: defaultdict, output_path_prefix: str): - with open(f"{output_path_prefix}_raw.pickle", "wb") as fp: - pickle.dump(cos_sims_dict, fp) - - -def tensor_list_to_pd(tensor_list): - tensor = torch.cat(tensor_list).cpu().numpy() - df = pd.DataFrame(tensor) - return df - - -def validate_cell_states_to_model(cell_states_to_model): - if cell_states_to_model is not None: - if len(cell_states_to_model.items()) == 1: - logger.warning( - "The single value dictionary for cell_states_to_model will be " - "replaced with a dictionary with named keys for start, goal, and alternate states. " - "Please specify state_key, start_state, goal_state, and alt_states " - "in the cell_states_to_model dictionary for future use. " - "For example, cell_states_to_model={" - "'state_key': 'disease', " - "'start_state': 'dcm', " - "'goal_state': 'nf', " - "'alt_states': ['hcm', 'other1', 'other2']}" - ) - for key, value in cell_states_to_model.items(): - if (len(value) == 3) and isinstance(value, tuple): - if ( - isinstance(value[0], list) - and isinstance(value[1], list) - and isinstance(value[2], list) - ): - if len(value[0]) == 1 and len(value[1]) == 1: - all_values = value[0] + value[1] + value[2] - if len(all_values) == len(set(all_values)): - continue - # reformat to the new named key format - state_values = flatten_list(list(cell_states_to_model.values())) - - cell_states_to_model = { - "state_key": list(cell_states_to_model.keys())[0], - "start_state": state_values[0][0], - "goal_state": state_values[1][0], - "alt_states": state_values[2:][0], - } - elif set(cell_states_to_model.keys()).issuperset( - {"state_key", "start_state", "goal_state"} - ): - if ( - (cell_states_to_model["state_key"] is None) - or (cell_states_to_model["start_state"] is None) - or (cell_states_to_model["goal_state"] is None) - ): - logger.error( - "Please specify 'state_key', 'start_state', and 'goal_state' in cell_states_to_model." - ) - raise - - if ( - cell_states_to_model["start_state"] - == cell_states_to_model["goal_state"] - ): - logger.error("All states must be unique.") - raise - - if "alt_states" in set(cell_states_to_model.keys()): - if cell_states_to_model["alt_states"] is not None: - if not isinstance(cell_states_to_model["alt_states"], list): - logger.error( - "cell_states_to_model['alt_states'] must be a list (even if it is one element)." - ) - raise - if len(cell_states_to_model["alt_states"]) != len( - set(cell_states_to_model["alt_states"]) - ): - logger.error("All states must be unique.") - raise - else: - cell_states_to_model["alt_states"] = [] - - else: - logger.error( - "cell_states_to_model must only have the following four keys: " - "'state_key', 'start_state', 'goal_state', 'alt_states'." - "For example, cell_states_to_model={" - "'state_key': 'disease', " - "'start_state': 'dcm', " - "'goal_state': 'nf', " - "'alt_states': ['hcm', 'other1', 'other2']}" - ) - raise - - -class GeneIdHandler: - def __init__(self, raise_errors=False): - def invert_dict(dict_obj): - return {v: k for k, v in dict_obj.items()} - - self.raise_errors = raise_errors - - with open(TOKEN_DICTIONARY_FILE, "rb") as f: - self.gene_token_dict = pickle.load(f) - self.token_gene_dict = invert_dict(self.gene_token_dict) - - with open(ENSEMBL_DICTIONARY_FILE, "rb") as f: - self.id_gene_dict = pickle.load(f) - self.gene_id_dict = invert_dict(self.id_gene_dict) - - def ens_to_token(self, ens_id): - if not self.raise_errors: - return self.gene_token_dict.get(ens_id, ens_id) - else: - return self.gene_token_dict[ens_id] - - def token_to_ens(self, token): - if not self.raise_errors: - return self.token_gene_dict.get(token, token) - else: - return self.token_gene_dict[token] - - def ens_to_symbol(self, ens_id): - if not self.raise_errors: - return self.gene_id_dict.get(ens_id, ens_id) - else: - return self.gene_id_dict[ens_id] - - def symbol_to_ens(self, symbol): - if not self.raise_errors: - return self.id_gene_dict.get(symbol, symbol) - else: - return self.id_gene_dict[symbol] - - def token_to_symbol(self, token): - return self.ens_to_symbol(self.token_to_ens(token)) - - def symbol_to_token(self, symbol): - return self.ens_to_token(self.symbol_to_ens(symbol)) diff --git a/geneformer/pretrainer.py b/geneformer/pretrainer.py index b1af8b8b8d204b8bc6a3003037918465f4a54a92..0882fb941563bd4e3d8fe24b54102434d479f097 100644 --- a/geneformer/pretrainer.py +++ b/geneformer/pretrainer.py @@ -8,12 +8,13 @@ import math import pickle import warnings from enum import Enum -from typing import Dict, List, Optional, Union +from typing import Dict, Iterator, List, Optional, Union import numpy as np import torch from datasets import Dataset from packaging import version +from torch.utils.data.distributed import DistributedSampler from torch.utils.data.sampler import RandomSampler from transformers import ( BatchEncoding, @@ -23,11 +24,16 @@ from transformers import ( ) from transformers.file_utils import is_datasets_available, is_sagemaker_dp_enabled from transformers.trainer_pt_utils import ( + DistributedLengthGroupedSampler, + DistributedSamplerWithLoop, LengthGroupedSampler, ) +from transformers.training_args import ParallelMode from transformers.utils import is_tf_available, is_torch_available, logging, to_py_obj from transformers.utils.generic import _is_tensorflow, _is_torch +from .tokenizer import TOKEN_DICTIONARY_FILE + logger = logging.get_logger(__name__) EncodedInput = List[int] VERY_LARGE_INTEGER = int( @@ -46,6 +52,9 @@ _is_torch_generator_available = False if version.parse(torch.__version__) >= version.parse("1.6"): _is_torch_generator_available = True +with open(TOKEN_DICTIONARY_FILE, "rb") as f: + token_dictionary = pickle.load(f) + class ExplicitEnum(Enum): """ @@ -97,13 +106,22 @@ class TensorType(ExplicitEnum): class GeneformerPreCollator(SpecialTokensMixin): def __init__(self, *args, **kwargs) -> None: - super().__init__(mask_token="", pad_token="") - + + super().__init__(mask_token = "", pad_token = "") + self.token_dictionary = kwargs.get("token_dictionary") + # self.mask_token = "" + # self.mask_token_id = self.token_dictionary.get("") + # self.pad_token = "" + # self.pad_token_id = self.token_dictionary.get("") self.padding_side = "right" + # self.all_special_ids = [ + # self.token_dictionary.get(""), + # self.token_dictionary.get(""), + # ] self.model_input_names = ["input_ids"] - - def convert_ids_to_tokens(self, value): + + def convert_ids_to_tokens(self,value): return self.token_dictionary.get(value) def _get_padding_truncation_strategies( @@ -363,7 +381,7 @@ class GeneformerPreCollator(SpecialTokensMixin): return_tensors = "tf" if return_tensors is None else return_tensors elif is_torch_available() and _is_torch(first_element): return_tensors = "pt" if return_tensors is None else return_tensors - elif isinstance(first_element, np.ndarray): + if isinstance(first_element, np.ndarray): return_tensors = "np" if return_tensors is None else return_tensors else: raise ValueError( @@ -373,6 +391,7 @@ class GeneformerPreCollator(SpecialTokensMixin): for key, value in encoded_inputs.items(): encoded_inputs[key] = to_py_obj(value) + # Convert padding_strategy in PaddingStrategy padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies( @@ -577,17 +596,15 @@ class GeneformerPreCollator(SpecialTokensMixin): class GeneformerPretrainer(Trainer): def __init__(self, *args, **kwargs): - data_collator = kwargs.get("data_collator", None) + data_collator = kwargs.get("data_collator",None) token_dictionary = kwargs.pop("token_dictionary") - mlm = kwargs.pop("mlm", True) - mlm_probability = kwargs.pop("mlm_probability", 0.15) if data_collator is None: precollator = GeneformerPreCollator(token_dictionary=token_dictionary) # # Data Collator Functions data_collator = DataCollatorForLanguageModeling( - tokenizer=precollator, mlm=mlm, mlm_probability=mlm_probability + tokenizer=precollator, mlm=True, mlm_probability=0.15 ) kwargs["data_collator"] = data_collator @@ -603,7 +620,7 @@ class GeneformerPretrainer(Trainer): ) super().__init__(*args, **kwargs) - # updated to not use distributed sampler since Trainer now distributes with accelerate + # modify LengthGroupedSampler to avoid dataset[length_column_name] hanging def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]: if not isinstance(self.train_dataset, collections.abc.Sized): return None @@ -626,15 +643,180 @@ class GeneformerPretrainer(Trainer): if self.tokenizer is not None else None ) - return LengthGroupedSampler( + if self.args.world_size <= 1: + return LengthGroupedSampler( dataset=self.train_dataset, batch_size=self.args.train_batch_size, lengths=lengths, model_input_name=model_input_name, generator=generator, + ) + else: + return CustomDistributedLengthGroupedSampler( + dataset=self.train_dataset, + batch_size=self.args.train_batch_size, + num_replicas=self.args.world_size, + rank=self.args.process_index, + lengths=lengths, + model_input_name=model_input_name, + seed=self.args.seed, + ) + + else: + if self.args.world_size <= 1: + if _is_torch_generator_available: + return RandomSampler(self.train_dataset, generator=generator) + return RandomSampler(self.train_dataset) + elif ( + self.args.parallel_mode + in [ParallelMode.TPU, ParallelMode.SAGEMAKER_MODEL_PARALLEL] + and not self.args.dataloader_drop_last + ): + # Use a loop for TPUs when drop_last is False to have all batches have the same size. + return DistributedSamplerWithLoop( + self.train_dataset, + batch_size=self.args.per_device_train_batch_size, + num_replicas=self.args.world_size, + rank=self.args.process_index, + seed=self.args.seed, + ) + else: + return DistributedSampler( + self.train_dataset, + num_replicas=self.args.world_size, + rank=self.args.process_index, + seed=self.args.seed, + ) + + +class CustomDistributedLengthGroupedSampler(DistributedLengthGroupedSampler): + r""" + Distributed Sampler that samples indices in a way that groups together features of the dataset of roughly the same + length while keeping a bit of randomness. + """ + # Copied and adapted from PyTorch DistributedSampler. + def __init__( + self, + dataset: Dataset, + batch_size: int, + num_replicas: Optional[int] = None, + rank: Optional[int] = None, + seed: int = 0, + drop_last: bool = False, + lengths: Optional[List[int]] = None, + model_input_name: Optional[str] = None, + ): + if num_replicas is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = dist.get_world_size() + if rank is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = dist.get_rank() + self.dataset = dataset + self.batch_size = batch_size + self.num_replicas = num_replicas + self.rank = rank + self.epoch = 0 + self.drop_last = drop_last + # If the dataset length is evenly divisible by # of replicas, then there + # is no need to drop any data, since the dataset will be split equally. + if self.drop_last and len(self.dataset) % self.num_replicas != 0: + # Split to nearest available length that is evenly divisible. + # This is to ensure each rank receives the same amount of data when + # using this Sampler. + self.num_samples = math.ceil( + (len(self.dataset) - self.num_replicas) / self.num_replicas ) + else: + self.num_samples = math.ceil(len(self.dataset) / self.num_replicas) + self.total_size = self.num_samples * self.num_replicas + self.seed = seed + self.model_input_name = ( + model_input_name if model_input_name is not None else "input_ids" + ) + if lengths is None: + print("Lengths is none - calculating lengths.") + if ( + not ( + isinstance(dataset[0], dict) + or isinstance(dataset[0], BatchEncoding) + ) + or self.model_input_name not in dataset[0] + ): + raise ValueError( + "Can only automatically infer lengths for datasets whose items are dictionaries with an " + f"'{self.model_input_name}' key." + ) + lengths = [len(feature[self.model_input_name]) for feature in dataset] + self.lengths = lengths + + def __iter__(self) -> Iterator: + # Deterministically shuffle based on epoch and seed + g = torch.Generator() + g.manual_seed(self.seed + self.epoch) + + indices = get_length_grouped_indices(self.lengths, self.batch_size, generator=g) + + if not self.drop_last: + # add extra samples to make it evenly divisible + indices += indices[: (self.total_size - len(indices))] else: - if _is_torch_generator_available: - return RandomSampler(self.train_dataset, generator=generator) - return RandomSampler(self.train_dataset) + # remove tail of data to make it evenly divisible. + indices = indices[: self.total_size] + assert len(indices) == self.total_size + + # subsample + indices = indices[self.rank : self.total_size : self.num_replicas] + assert len(indices) == self.num_samples + + return iter(indices) + + +def get_length_grouped_indices( + lengths, batch_size, mega_batch_mult=None, generator=None +): + """ + Return a list of indices so that each slice of :obj:`batch_size` consecutive indices correspond to elements of + similar lengths. To do this, the indices are: + + - randomly permuted + - grouped in mega-batches of size :obj:`mega_batch_mult * batch_size` + - sorted by length in each mega-batch + + The result is the concatenation of all mega-batches, with the batch of :obj:`batch_size` containing the element of + maximum length placed first, so that an OOM happens sooner rather than later. + """ + # Default for mega_batch_mult: 50 or the number to get 4 megabatches, whichever is smaller. + if mega_batch_mult is None: + # mega_batch_mult = min(len(lengths) // (batch_size * 4), 50) + mega_batch_mult = min(len(lengths) // (batch_size * 4), 1000) + # Just in case, for tiny datasets + if mega_batch_mult == 0: + mega_batch_mult = 1 + + # We need to use torch for the random part as a distributed sampler will set the random seed for torch. + indices = torch.randperm(len(lengths), generator=generator) + megabatch_size = mega_batch_mult * batch_size + megabatches = [ + indices[i : i + megabatch_size].tolist() + for i in range(0, len(lengths), megabatch_size) + ] + megabatches = [ + list(sorted(megabatch, key=lambda i: lengths[i], reverse=True)) + for megabatch in megabatches + ] + + # The rest is to get the biggest batch first. + # Since each megabatch is sorted by descending length, the longest element is the first + megabatch_maximums = [lengths[megabatch[0]] for megabatch in megabatches] + max_idx = torch.argmax(torch.tensor(megabatch_maximums)).item() + # Switch to put the longest element in first position + megabatches[0][0], megabatches[max_idx][0] = ( + megabatches[max_idx][0], + megabatches[0][0], + ) + + return [item for sublist in megabatches for item in sublist] diff --git a/geneformer/token_dictionary.pkl b/geneformer/token_dictionary.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e879153d2fa7a53486d7d0888663d8bb82599836 Binary files /dev/null and b/geneformer/token_dictionary.pkl differ diff --git a/geneformer/token_dictionary_gc95M.pkl b/geneformer/token_dictionary_gc95M.pkl deleted file mode 100644 index b56e406e79c255328f84d9ca00c5c3da2dd04811..0000000000000000000000000000000000000000 --- a/geneformer/token_dictionary_gc95M.pkl +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:67c445f4385127adfc48dcc072320cd65d6822829bf27dd38070e6e787bc597f -size 425590 diff --git a/geneformer/tokenizer.py b/geneformer/tokenizer.py index b460f028c9d85630b34722a290df6dd40f8908aa..94837ec70d7b43a2a240ade3f27355ebd2d73f24 100644 --- a/geneformer/tokenizer.py +++ b/geneformer/tokenizer.py @@ -1,75 +1,35 @@ """ Geneformer tokenizer. -**Input data:** - -| *Required format:* raw counts scRNAseq data without feature selection as .loom or anndata file. -| *Required row (gene) attribute:* "ensembl_id"; Ensembl ID for each gene. -| *Required col (cell) attribute:* "n_counts"; total read counts in that cell. - -| *Optional col (cell) attribute:* "filter_pass"; binary indicator of whether cell should be tokenized based on user-defined filtering criteria. -| *Optional col (cell) attributes:* any other cell metadata can be passed on to the tokenized dataset as a custom attribute dictionary as shown below. - -**Usage:** - -.. code-block :: python - - >>> from geneformer import TranscriptomeTokenizer - >>> tk = TranscriptomeTokenizer({"cell_type": "cell_type", "organ_major": "organ"}, nproc=4) - >>> tk.tokenize_data("data_directory", "output_directory", "output_prefix") - -**Description:** - -| Input data is a directory with .loom or .h5ad files containing raw counts from single cell RNAseq data, including all genes detected in the transcriptome without feature selection. The input file type is specified by the argument file_format in the tokenize_data function. - -| The discussion below references the .loom file format, but the analagous labels are required for .h5ad files, just that they will be column instead of row attributes and vice versa due to the transposed format of the two file types. - -| Genes should be labeled with Ensembl IDs (loom row attribute "ensembl_id"), which provide a unique identifer for conversion to tokens. Other forms of gene annotations (e.g. gene names) can be converted to Ensembl IDs via Ensembl Biomart. Cells should be labeled with the total read count in the cell (loom column attribute "n_counts") to be used for normalization. - -| No cell metadata is required, but custom cell attributes may be passed onto the tokenized dataset by providing a dictionary of custom attributes to be added, which is formatted as loom_col_attr_name : desired_dataset_col_attr_name. For example, if the original .loom dataset has column attributes "cell_type" and "organ_major" and one would like to retain these attributes as labels in the tokenized dataset with the new names "cell_type" and "organ", respectively, the following custom attribute dictionary should be provided: {"cell_type": "cell_type", "organ_major": "organ"}. - -| Additionally, if the original .loom file contains a cell column attribute called "filter_pass", this column will be used as a binary indicator of whether to include these cells in the tokenized data. All cells with "1" in this attribute will be tokenized, whereas the others will be excluded. One may use this column to indicate QC filtering or other criteria for selection for inclusion in the final tokenized dataset. - -| If one's data is in other formats besides .loom or .h5ad, one can use the relevant tools (such as Anndata tools) to convert the file to a .loom or .h5ad format prior to running the transcriptome tokenizer. - -| OF NOTE: Take care that the correct token dictionary and gene median file is used for the correct model. - -| OF NOTE: For 95M model series, special_token should be True and model_input_size should be 4096. For 30M model series, special_token should be False and model_input_size should be 2048. - +Input data: +Required format: raw counts scRNAseq data without feature selection as .loom file +Required row (gene) attribute: "ensembl_id"; Ensembl ID for each gene +Required col (cell) attribute: "n_counts"; total read counts in that cell +Optional col (cell) attribute: "filter_pass"; binary indicator of whether cell should be tokenized based on user-defined filtering criteria +Optional col (cell) attributes: any other cell metadata can be passed on to the tokenized dataset as a custom attribute dictionary as shown below + +Usage: + from geneformer import TranscriptomeTokenizer + tk = TranscriptomeTokenizer({"cell_type": "cell_type", "organ_major": "organ_major"}, nproc=4) + tk.tokenize_data("loom_data_directory", "output_directory", "output_prefix") """ -from __future__ import annotations +import pickle +from pathlib import Path import logging -import os -import pickle + import warnings -from collections import Counter -from pathlib import Path -from typing import Literal +warnings.filterwarnings("ignore", message=".*The 'nopython' keyword.*") import loompy as lp import numpy as np -import pandas as pd -import scanpy as sc -import scipy.sparse as sp from datasets import Dataset -from tqdm import tqdm - -warnings.filterwarnings("ignore", message=".*The 'nopython' keyword.*") # noqa -import loompy as lp # noqa logger = logging.getLogger(__name__) -from . import ENSEMBL_MAPPING_FILE, GENE_MEDIAN_FILE, TOKEN_DICTIONARY_FILE - -def rank_genes(gene_vector, gene_tokens): - """ - Rank gene expression vector. - """ - # sort by median-scaled gene values - sorted_indices = np.argsort(-gene_vector) - return gene_tokens[sorted_indices] +GENE_MEDIAN_FILE = Path(__file__).parent / "gene_median_dictionary.pkl" +TOKEN_DICTIONARY_FILE = Path(__file__).parent / "token_dictionary.pkl" def tokenize_cell(gene_vector, gene_tokens): @@ -79,215 +39,11 @@ def tokenize_cell(gene_vector, gene_tokens): # create array of gene vector with token indices # mask undetected genes nonzero_mask = np.nonzero(gene_vector)[0] - # rank by median-scaled gene values - return rank_genes(gene_vector[nonzero_mask], gene_tokens[nonzero_mask]) - - -def sum_ensembl_ids( - data_directory, - collapse_gene_ids, - gene_mapping_dict, - gene_token_dict, - custom_attr_name_dict, - file_format="loom", - chunk_size=512, -): - if file_format == "loom": - """ - Map Ensembl IDs from gene mapping dictionary. If duplicate Ensembl IDs are found, sum counts together. - """ - with lp.connect(data_directory) as data: - assert ( - "ensembl_id" in data.ra.keys() - ), "'ensembl_id' column missing from data.ra.keys()" - - assert ( - "ensembl_id_collapsed" not in data.ra.keys() - ), "'ensembl_id_collapsed' column already exists in data.ra.keys()" - - assert ( - "n_counts" in data.ca.keys() - ), "'n_counts' column missing from data.ca.keys()" - - if custom_attr_name_dict is not None: - for label in custom_attr_name_dict: - assert label in data.ca.keys(), f"Attribute `{label}` not present in dataset features" - - # Get the ensembl ids that exist in data - ensembl_ids = data.ra.ensembl_id - # Check for duplicate Ensembl IDs if collapse_gene_ids is False. - # Comparing to gene_token_dict here, would not perform any mapping steps - if not collapse_gene_ids: - ensembl_id_check = [ - gene for gene in ensembl_ids if gene in gene_token_dict.keys() - ] - if len(ensembl_id_check) == len(set(ensembl_id_check)): - return data_directory - else: - raise ValueError("Error: data Ensembl IDs non-unique.") - - # Get the genes that exist in the mapping dictionary and the value of those genes - genes_in_map_dict = [gene for gene in ensembl_ids if gene in gene_mapping_dict.keys()] - vals_from_map_dict = [gene_mapping_dict.get(gene) for gene in genes_in_map_dict] - - # if the genes in the mapping dict and the value of those genes are of the same length, - # simply return the mapped values - if(len(set(genes_in_map_dict)) == len(set(vals_from_map_dict))): - mapped_vals = [gene_mapping_dict.get(gene.upper()) for gene in data.ra["ensembl_id"]] - data.ra["ensembl_id_collapsed"] = mapped_vals - return data_directory - # Genes need to be collapsed - else: - dedup_filename = data_directory.with_name( - data_directory.stem + "__dedup.loom" - ) - mapped_vals = [gene_mapping_dict.get(gene.upper()) for gene in data.ra["ensembl_id"]] - data.ra["ensembl_id_collapsed"] = mapped_vals - dup_genes = [ - idx - for idx, count in Counter(data.ra["ensembl_id_collapsed"]).items() - if count > 1 - ] - num_chunks = int(np.ceil(data.shape[1] / chunk_size)) - first_chunk = True - for _, _, view in tqdm( - data.scan(axis=1, batch_size=chunk_size), total=num_chunks - ): - - def process_chunk(view, duplic_genes): - data_count_view = pd.DataFrame( - view, index=data.ra["ensembl_id_collapsed"] - ) - unique_data_df = data_count_view.loc[ - ~data_count_view.index.isin(duplic_genes) - ] - dup_data_df = data_count_view.loc[ - data_count_view.index.isin( - [i for i in duplic_genes if "None" not in i] - ) - ] - summed_data = dup_data_df.groupby(dup_data_df.index).sum() - if not summed_data.index.is_unique: - raise ValueError( - "Error: Ensembl IDs in summed data frame non-unique." - ) - data_count_view = pd.concat( - [unique_data_df, summed_data], axis=0 - ) - if not data_count_view.index.is_unique: - raise ValueError( - "Error: Ensembl IDs in final data frame non-unique." - ) - return data_count_view - - processed_chunk = process_chunk(view[:, :], dup_genes) - processed_array = processed_chunk.to_numpy() - new_row_attrs = {"ensembl_id_collapsed": processed_chunk.index.to_numpy()} - - if "n_counts" not in view.ca.keys(): - total_count_view = np.sum(view[:, :], axis=0).astype(int) - view.ca["n_counts"] = total_count_view - - if first_chunk: # Create the Loom file with the first chunk - lp.create( - f"{dedup_filename}", - processed_array, - row_attrs=new_row_attrs, - col_attrs=view.ca, - ) - first_chunk = False - else: # Append subsequent chunks - with lp.connect(dedup_filename, mode="r+") as dsout: - dsout.add_columns(processed_array, col_attrs=view.ca) - return dedup_filename - - elif file_format == "h5ad": - """ - Map Ensembl IDs from gene mapping dictionary. If duplicate Ensembl IDs are found, sum counts together. - Returns adata object with deduplicated Ensembl IDs. - """ - - data = sc.read_h5ad(str(data_directory)) - - assert ( - "ensembl_id" in data.var.columns - ), "'ensembl_id' column missing from data.var" - - assert ( - "ensembl_id_collapsed" not in data.var.columns - ), "'ensembl_id_collapsed' column already exists in data.var" - assert ( - "n_counts" in data.obs.columns - ), "'n_counts' column missing from data.obs" - - if custom_attr_name_dict is not None: - for label in custom_attr_name_dict: - assert label in data.obs.columns, f"Attribute `{label}` not present in data.obs" - - - # Get the ensembl ids that exist in data - ensembl_ids = data.var.ensembl_id - # Check for duplicate Ensembl IDs if collapse_gene_ids is False. - # Comparing to gene_token_dict here, would not perform any mapping steps - if not collapse_gene_ids: - ensembl_id_check = [ - gene for gene in ensembl_ids if gene in gene_token_dict.keys() - ] - if len(ensembl_id_check) == len(set(ensembl_id_check)): - return data_directory - else: - raise ValueError("Error: data Ensembl IDs non-unique.") - - # Get the genes that exist in the mapping dictionary and the value of those genes - genes_in_map_dict = [gene for gene in ensembl_ids if gene in gene_mapping_dict.keys()] - vals_from_map_dict = [gene_mapping_dict.get(gene) for gene in genes_in_map_dict] - - # if the genes in the mapping dict and the value of those genes are of the same length, - # simply return the mapped values - if(len(set(genes_in_map_dict)) == len(set(vals_from_map_dict))): - data.var["ensembl_id_collapsed"] = data.var.ensembl_id.str.upper().map(gene_mapping_dict) - return data - # Genes need to be collapsed - else: - data.var["ensembl_id_collapsed"] = data.var.ensembl_id.str.upper().map(gene_mapping_dict) - data.var_names = data.var["ensembl_id_collapsed"] - data = data[:, ~data.var.index.isna()] - dup_genes = [ - idx for idx, count in Counter(data.var_names).items() if count > 1 - ] - - num_chunks = int(np.ceil(data.shape[0] / chunk_size)) - - processed_genes = [] - for i in tqdm(range(num_chunks)): - start_idx = i * chunk_size - end_idx = min((i + 1) * chunk_size, data.shape[0]) - data_chunk = data[start_idx:end_idx, :] - - processed_chunks = [] - for dup_gene in dup_genes: - data_dup_gene = data_chunk[:, data_chunk.var_names == dup_gene] - df = pd.DataFrame.sparse.from_spmatrix( - data_dup_gene.X, - index=data_dup_gene.obs_names, - columns=data_dup_gene.var_names, - ) - df_sum = pd.DataFrame(df.sum(axis=1)) - df_sum.columns = [dup_gene] - df_sum.index = data_dup_gene.obs.index - processed_chunks.append(df_sum) - - processed_chunks = pd.concat(processed_chunks, axis=1) - processed_genes.append(processed_chunks) - processed_genes = pd.concat(processed_genes, axis=0) - var_df = pd.DataFrame({"ensembl_id_collapsed": processed_genes.columns}) - var_df.index = processed_genes.columns - processed_genes = sc.AnnData(X=processed_genes, obs=data.obs, var=var_df) - - data_dedup = data[:, ~data.var.index.isin(dup_genes)] # Deduplicated data - data_dedup = sc.concat([data_dedup, processed_genes], axis=1) - data_dedup.obs = data.obs - return data_dedup + # sort by median-scaled gene values + sorted_indices = np.argsort(-gene_vector[nonzero_mask]) + # tokenize + sentence_tokens = gene_tokens[nonzero_mask][sorted_indices] + return sentence_tokens class TranscriptomeTokenizer: @@ -295,43 +51,25 @@ class TranscriptomeTokenizer: self, custom_attr_name_dict=None, nproc=1, - chunk_size=512, - model_input_size=4096, - special_token=True, - collapse_gene_ids=True, gene_median_file=GENE_MEDIAN_FILE, token_dictionary_file=TOKEN_DICTIONARY_FILE, - gene_mapping_file=ENSEMBL_MAPPING_FILE, ): """ Initialize tokenizer. - - **Parameters:** - + + Parameters + ---------- custom_attr_name_dict : None, dict - | Dictionary of custom attributes to be added to the dataset. - | Keys are the names of the attributes in the loom file. - | Values are the names of the attributes in the dataset. + Dictionary of custom attributes to be added to the dataset. + Keys are the names of the attributes in the loom file. + Values are the names of the attributes in the dataset. nproc : int - | Number of processes to use for dataset mapping. - chunk_size : int = 512 - | Chunk size for anndata tokenizer. - model_input_size : int = 4096 - | Max input size of model to truncate input to. - | For the 30M model series, should be 2048. For the 95M model series, should be 4096. - special_token : bool = True - | Adds CLS token before and EOS token after rank value encoding. - | For the 30M model series, should be False. For the 95M model series, should be True. - collapse_gene_ids : bool = True - | Whether to collapse gene IDs based on gene mapping dictionary. + Number of processes to use for dataset mapping. gene_median_file : Path - | Path to pickle file containing dictionary of non-zero median - | gene expression values across Genecorpus-30M. + Path to pickle file containing dictionary of non-zero median + gene expression values across Genecorpus-30M. token_dictionary_file : Path - | Path to pickle file containing token dictionary (Ensembl IDs:token). - gene_mapping_file : None, Path - | Path to pickle file containing dictionary for collapsing gene IDs. - + Path to pickle file containing token dictionary (Ensembl IDs:token). """ # dictionary of custom attributes {output dataset column name: input .loom column name} self.custom_attr_name_dict = custom_attr_name_dict @@ -339,15 +77,6 @@ class TranscriptomeTokenizer: # number of processes for dataset mapping self.nproc = nproc - # chunk size for anndata tokenizer - self.chunk_size = chunk_size - - # input size for tokenization - self.model_input_size = model_input_size - - # add CLS and EOS tokens - self.special_token = special_token - # load dictionary of gene normalization factors # (non-zero median value of expression across Genecorpus-30M) with open(gene_median_file, "rb") as f: @@ -357,219 +86,76 @@ class TranscriptomeTokenizer: with open(token_dictionary_file, "rb") as f: self.gene_token_dict = pickle.load(f) - # check for special token in gene_token_dict - if self.special_token: - if ("" not in self.gene_token_dict.keys()) and ( - "" not in self.gene_token_dict.keys() - ): - logger.error( - " and required in gene_token_dict when special_token = True." - ) - raise - - if not self.special_token: - if ("" in self.gene_token_dict.keys()) and ( - "" in self.gene_token_dict.keys() - ): - logger.warning( - " and are in gene_token_dict but special_token = False. Please note that for 95M model series, special_token should be True." - ) - - # if collapsing duplicate gene IDs - self.collapse_gene_ids = collapse_gene_ids - - # load gene mappings dictionary (Ensembl IDs:Ensembl ID) - if gene_mapping_file is not None: - with open(gene_mapping_file, "rb") as f: - self.gene_mapping_dict = pickle.load(f) - else: - self.gene_mapping_dict = {k: k for k, _ in self.gene_token_dict.items()} - # gene keys for full vocabulary - self.gene_keys = list(self.gene_token_dict.keys()) - - # Filter gene mapping dict for items that exist in gene_token_dict - gene_keys_set = set(self.gene_token_dict.keys()) - self.gene_mapping_dict = { - k: v for k, v in self.gene_mapping_dict.items() if v in gene_keys_set - } + self.gene_keys = list(self.gene_median_dict.keys()) # protein-coding and miRNA gene list dictionary for selecting .loom rows for tokenization self.genelist_dict = dict(zip(self.gene_keys, [True] * len(self.gene_keys))) - def tokenize_data( - self, - data_directory: Path | str, - output_directory: Path | str, - output_prefix: str, - file_format: Literal["loom", "h5ad"] = "loom", - use_generator: bool = False, - ): + def tokenize_data(self, loom_data_directory, output_directory, output_prefix): """ - Tokenize .loom files in data_directory and save as tokenized .dataset in output_directory. - - **Parameters:** - - data_directory : Path - | Path to directory containing loom files or anndata files + Tokenize .loom files in loom_data_directory and save as tokenized .dataset in output_directory. + + Parameters + ---------- + loom_data_directory : Path + Path to directory containing loom files output_directory : Path - | Path to directory where tokenized data will be saved as .dataset + Path to directory where tokenized data will be saved as .dataset output_prefix : str - | Prefix for output .dataset - file_format : str - | Format of input files. Can be "loom" or "h5ad". - use_generator : bool - | Whether to use generator or dict for tokenization. - + Prefix for output .dataset """ - tokenized_cells, cell_metadata = self.tokenize_files( - Path(data_directory), file_format - ) - tokenized_dataset = self.create_dataset( - tokenized_cells, - cell_metadata, - use_generator=use_generator, - ) + tokenized_cells, cell_metadata = self.tokenize_files(Path(loom_data_directory)) + tokenized_dataset = self.create_dataset(tokenized_cells, cell_metadata) output_path = (Path(output_directory) / output_prefix).with_suffix(".dataset") - tokenized_dataset.save_to_disk(str(output_path)) + tokenized_dataset.save_to_disk(output_path) - def tokenize_files( - self, data_directory, file_format: Literal["loom", "h5ad"] = "loom" - ): + def tokenize_files(self, loom_data_directory): tokenized_cells = [] if self.custom_attr_name_dict is not None: - cell_attr = [attr_key for attr_key in self.custom_attr_name_dict.keys()] - cell_metadata = { - attr_key: [] for attr_key in self.custom_attr_name_dict.values() - } + loom_cell_attr = [attr_key for attr_key in self.custom_attr_name_dict.keys()] + cell_metadata = {attr_key: [] for attr_key in self.custom_attr_name_dict.values()} # loops through directories to tokenize .loom files file_found = 0 - # loops through directories to tokenize .loom or .h5ad files - tokenize_file_fn = ( - self.tokenize_loom if file_format == "loom" else self.tokenize_anndata - ) - for file_path in data_directory.glob(f"*.{file_format}"): + for loom_file_path in loom_data_directory.glob("*.loom"): file_found = 1 - print(f"Tokenizing {file_path}") - file_tokenized_cells, file_cell_metadata = tokenize_file_fn(file_path) + print(f"Tokenizing {loom_file_path}") + file_tokenized_cells, file_cell_metadata = self.tokenize_file( + loom_file_path + ) tokenized_cells += file_tokenized_cells if self.custom_attr_name_dict is not None: - for k in cell_attr: - cell_metadata[self.custom_attr_name_dict[k]] += file_cell_metadata[ - k - ] + for k in loom_cell_attr: + cell_metadata[self.custom_attr_name_dict[k]] += file_cell_metadata[k] else: cell_metadata = None if file_found == 0: logger.error( - f"No .{file_format} files found in directory {data_directory}." - ) + f"No .loom files found in directory {loom_data_directory}.") raise return tokenized_cells, cell_metadata - def tokenize_anndata(self, adata_file_path, target_sum=10_000): - adata = sum_ensembl_ids( - adata_file_path, - self.collapse_gene_ids, - self.gene_mapping_dict, - self.gene_token_dict, - self.custom_attr_name_dict, - file_format="h5ad", - chunk_size=self.chunk_size, - ) - + def tokenize_file(self, loom_file_path): if self.custom_attr_name_dict is not None: file_cell_metadata = { attr_key: [] for attr_key in self.custom_attr_name_dict.keys() } - coding_miRNA_loc = np.where( - [self.genelist_dict.get(i, False) for i in adata.var["ensembl_id_collapsed"]] - )[0] - norm_factor_vector = np.array( - [ - self.gene_median_dict[i] - for i in adata.var["ensembl_id_collapsed"][coding_miRNA_loc] - ] - ) - coding_miRNA_ids = adata.var["ensembl_id_collapsed"][coding_miRNA_loc] - coding_miRNA_tokens = np.array( - [self.gene_token_dict[i] for i in coding_miRNA_ids] - ) - - try: - _ = adata.obs["filter_pass"] - except KeyError: - var_exists = False - else: - var_exists = True - - if var_exists: - filter_pass_loc = np.where([i == 1 for i in adata.obs["filter_pass"]])[0] - elif not var_exists: - print( - f"{adata_file_path} has no column attribute 'filter_pass'; tokenizing all cells." - ) - filter_pass_loc = np.array([i for i in range(adata.shape[0])]) - - tokenized_cells = [] - - for i in range(0, len(filter_pass_loc), self.chunk_size): - idx = filter_pass_loc[i : i + self.chunk_size] - - n_counts = adata[idx].obs["n_counts"].values[:, None] - X_view0 = adata[idx, :].X - X_view = X_view0[:, coding_miRNA_loc] - X_norm = X_view / n_counts * target_sum / norm_factor_vector - X_norm = sp.csr_matrix(X_norm) - - tokenized_cells += [ - rank_genes(X_norm[i].data, coding_miRNA_tokens[X_norm[i].indices]) - for i in range(X_norm.shape[0]) - ] - - # add custom attributes for subview to dict - if self.custom_attr_name_dict is not None: - for k in file_cell_metadata.keys(): - file_cell_metadata[k] += adata[idx].obs[k].tolist() - else: - file_cell_metadata = None - - return tokenized_cells, file_cell_metadata - - def tokenize_loom(self, loom_file_path, target_sum=10_000): - if self.custom_attr_name_dict is not None: - file_cell_metadata = { - attr_key: [] for attr_key in self.custom_attr_name_dict.keys() - } - loom_file_path_original = loom_file_path - - dedup_filename = loom_file_path.with_name(loom_file_path.stem + "__dedup.loom") - loom_file_path = sum_ensembl_ids( - loom_file_path, - self.collapse_gene_ids, - self.gene_mapping_dict, - self.gene_token_dict, - self.custom_attr_name_dict, - file_format="loom", - chunk_size=self.chunk_size, - ) - with lp.connect(str(loom_file_path)) as data: # define coordinates of detected protein-coding or miRNA genes and vector of their normalization factors coding_miRNA_loc = np.where( - [self.genelist_dict.get(i, False) for i in data.ra["ensembl_id_collapsed"]] + [self.genelist_dict.get(i, False) for i in data.ra["ensembl_id"]] )[0] norm_factor_vector = np.array( [ self.gene_median_dict[i] - for i in data.ra["ensembl_id_collapsed"][coding_miRNA_loc] + for i in data.ra["ensembl_id"][coding_miRNA_loc] ] ) - coding_miRNA_ids = data.ra["ensembl_id_collapsed"][coding_miRNA_loc] + coding_miRNA_ids = data.ra["ensembl_id"][coding_miRNA_loc] coding_miRNA_tokens = np.array( [self.gene_token_dict[i] for i in coding_miRNA_ids] ) @@ -582,9 +168,11 @@ class TranscriptomeTokenizer: else: var_exists = True - if var_exists: - filter_pass_loc = np.where([i == 1 for i in data.ca["filter_pass"]])[0] - elif not var_exists: + if var_exists is True: + filter_pass_loc = np.where( + [True if i == 1 else False for i in data.ca["filter_pass"]] + )[0] + elif var_exists is False: print( f"{loom_file_path} has no column attribute 'filter_pass'; tokenizing all cells." ) @@ -592,9 +180,7 @@ class TranscriptomeTokenizer: # scan through .loom files and tokenize cells tokenized_cells = [] - for _ix, _selection, view in data.scan( - items=filter_pass_loc, axis=1, batch_size=self.chunk_size - ): + for (_ix, _selection, view) in data.scan(items=filter_pass_loc, axis=1): # select subview with protein-coding and miRNA genes subview = view.view[coding_miRNA_loc, :] @@ -603,7 +189,7 @@ class TranscriptomeTokenizer: subview_norm_array = ( subview[:, :] / subview.ca.n_counts - * target_sum + * 10_000 / norm_factor_vector[:, None] ) # tokenize subview gene vectors @@ -619,67 +205,31 @@ class TranscriptomeTokenizer: else: file_cell_metadata = None - if str(dedup_filename) == str(loom_file_path): - os.remove(str(dedup_filename)) - - with lp.connect(str(loom_file_path_original)) as data: - if "ensembl_id_collapsed" in data.ra.keys(): - del data.ra["ensembl_id_collapsed"] - - return tokenized_cells, file_cell_metadata - def create_dataset( - self, - tokenized_cells, - cell_metadata, - use_generator=False, - keep_uncropped_input_ids=False, - ): - print("Creating dataset.") + def create_dataset(self, tokenized_cells, cell_metadata): # create dict for dataset creation dataset_dict = {"input_ids": tokenized_cells} if self.custom_attr_name_dict is not None: dataset_dict.update(cell_metadata) # create dataset - if use_generator: - - def dict_generator(): - for i in range(len(tokenized_cells)): - yield {k: dataset_dict[k][i] for k in dataset_dict.keys()} - - output_dataset = Dataset.from_generator(dict_generator, num_proc=self.nproc) - else: - output_dataset = Dataset.from_dict(dataset_dict) - - def format_cell_features(example): - # Store original uncropped input_ids in separate feature - if keep_uncropped_input_ids: - example["input_ids_uncropped"] = example["input_ids"] - example["length_uncropped"] = len(example["input_ids"]) - - # Truncate/Crop input_ids to input size - if self.special_token: - example["input_ids"] = example["input_ids"][ - 0 : self.model_input_size - 2 - ] # truncate to leave space for CLS and EOS token - example["input_ids"] = np.insert( - example["input_ids"], 0, self.gene_token_dict.get("") - ) - example["input_ids"] = np.insert( - example["input_ids"], - len(example["input_ids"]), - self.gene_token_dict.get(""), - ) - else: - # Truncate/Crop input_ids to input size - example["input_ids"] = example["input_ids"][0 : self.model_input_size] - example["length"] = len(example["input_ids"]) + output_dataset = Dataset.from_dict(dataset_dict) + # truncate dataset + def truncate(example): + example["input_ids"] = example["input_ids"][0:2048] return example - output_dataset_truncated = output_dataset.map( - format_cell_features, num_proc=self.nproc + output_dataset_truncated = output_dataset.map(truncate, num_proc=self.nproc) + + # measure lengths of dataset + def measure_length(example): + example["length"] = len(example["input_ids"]) + return example + + output_dataset_truncated_w_length = output_dataset_truncated.map( + measure_length, num_proc=self.nproc ) - return output_dataset_truncated + + return output_dataset_truncated_w_length diff --git a/generation_config.json b/generation_config.json deleted file mode 100644 index 6f690c1f39b5b262e6b898b8891afd9d44978f11..0000000000000000000000000000000000000000 --- a/generation_config.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "_from_model_config": true, - "pad_token_id": 0, - "transformers_version": "4.37.1" -} diff --git a/gf-12L-95M-i4096/config.json b/gf-12L-95M-i4096/config.json deleted file mode 100755 index 86e20c35e6f257f0daeb00ebb92a0751d12d8fff..0000000000000000000000000000000000000000 --- a/gf-12L-95M-i4096/config.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "architectures": [ - "BertForMaskedLM" - ], - "attention_probs_dropout_prob": 0.02, - "classifier_dropout": null, - "hidden_act": "relu", - "hidden_dropout_prob": 0.02, - "hidden_size": 512, - "initializer_range": 0.02, - "intermediate_size": 1024, - "layer_norm_eps": 1e-12, - "max_position_embeddings": 4096, - "model_type": "bert", - "num_attention_heads": 8, - "num_hidden_layers": 12, - "pad_token_id": 0, - "position_embedding_type": "absolute", - "torch_dtype": "float32", - "transformers_version": "4.37.1", - "type_vocab_size": 2, - "use_cache": true, - "vocab_size": 20275 -} diff --git a/gf-12L-95M-i4096/generation_config.json b/gf-12L-95M-i4096/generation_config.json deleted file mode 100755 index 6f690c1f39b5b262e6b898b8891afd9d44978f11..0000000000000000000000000000000000000000 --- a/gf-12L-95M-i4096/generation_config.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "_from_model_config": true, - "pad_token_id": 0, - "transformers_version": "4.37.1" -} diff --git a/gf-12L-95M-i4096/model.safetensors b/gf-12L-95M-i4096/model.safetensors deleted file mode 100755 index 1069352219a29bed65fa8e13feb77004128174fa..0000000000000000000000000000000000000000 --- a/gf-12L-95M-i4096/model.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:4365ba23e393fcfa0e65a94ac64a0983cd788bd23a8d4914f4ab66f85cfe043c -size 152012980 diff --git a/gf-12L-95M-i4096/training_args.bin b/gf-12L-95M-i4096/training_args.bin deleted file mode 100755 index 18802f485a03e0262866d1ef7a3e4748a3b14ed3..0000000000000000000000000000000000000000 --- a/gf-12L-95M-i4096/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:21a45980734b138029422e95a5601def858821a9ec02cd473938b9f525ac108d -size 4920 diff --git a/gf-12L-95M-i4096_CLcancer/config.json b/gf-12L-95M-i4096_CLcancer/config.json deleted file mode 100755 index a7793eb2ea27b28f1f4c5b9974d30c98b4afe8a6..0000000000000000000000000000000000000000 --- a/gf-12L-95M-i4096_CLcancer/config.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "_name_or_path": "/gladstone/theodoris/lab/pretrained_models/encoder/240402_194213_geneformer_94M_L12_emb512_SL4096_E3_B4_LR0.0005_LScosine_WU5000_Oadamw_DS8/models", - "architectures": [ - "BertForMaskedLM" - ], - "attention_probs_dropout_prob": 0.02, - "classifier_dropout": null, - "hidden_act": "relu", - "hidden_dropout_prob": 0.02, - "hidden_size": 512, - "initializer_range": 0.02, - "intermediate_size": 1024, - "layer_norm_eps": 1e-12, - "max_position_embeddings": 4096, - "model_type": "bert", - "num_attention_heads": 8, - "num_hidden_layers": 12, - "pad_token_id": 0, - "position_embedding_type": "absolute", - "torch_dtype": "float32", - "transformers_version": "4.37.1", - "type_vocab_size": 2, - "use_cache": true, - "vocab_size": 20275 -} diff --git a/gf-12L-95M-i4096_CLcancer/generation_config.json b/gf-12L-95M-i4096_CLcancer/generation_config.json deleted file mode 100755 index 6f690c1f39b5b262e6b898b8891afd9d44978f11..0000000000000000000000000000000000000000 --- a/gf-12L-95M-i4096_CLcancer/generation_config.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "_from_model_config": true, - "pad_token_id": 0, - "transformers_version": "4.37.1" -} diff --git a/gf-12L-95M-i4096_CLcancer/model.safetensors b/gf-12L-95M-i4096_CLcancer/model.safetensors deleted file mode 100755 index cc620ee4b4243b7ab6d83ad518563e1425eab45b..0000000000000000000000000000000000000000 --- a/gf-12L-95M-i4096_CLcancer/model.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:2451adeed240c165634fea60ccba17063da8a2843ea9fcdcc0ce185720bf0dc2 -size 152012980 diff --git a/gf-12L-95M-i4096_CLcancer/training_args.bin b/gf-12L-95M-i4096_CLcancer/training_args.bin deleted file mode 100755 index 1669f5848710ca4a53db6e118e50b816f85381b7..0000000000000000000000000000000000000000 --- a/gf-12L-95M-i4096_CLcancer/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:37074f3ea62a6ba0a312c38526c20c2dccbb068a2c7ee8c7c73b435dd90ab7b1 -size 5048 diff --git a/gf-20L-95M-i4096/config.json b/gf-20L-95M-i4096/config.json deleted file mode 100755 index db949ba1ae442ad3b9e52fd8b7922c6b936ef98c..0000000000000000000000000000000000000000 --- a/gf-20L-95M-i4096/config.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "architectures": [ - "BertForMaskedLM" - ], - "attention_probs_dropout_prob": 0.02, - "classifier_dropout": null, - "hidden_act": "relu", - "hidden_dropout_prob": 0.02, - "hidden_size": 896, - "initializer_range": 0.02, - "intermediate_size": 1792, - "layer_norm_eps": 1e-12, - "max_position_embeddings": 4096, - "model_type": "bert", - "num_attention_heads": 14, - "num_hidden_layers": 20, - "pad_token_id": 0, - "position_embedding_type": "absolute", - "torch_dtype": "float32", - "transformers_version": "4.37.1", - "type_vocab_size": 2, - "use_cache": true, - "vocab_size": 20275 -} diff --git a/gf-20L-95M-i4096/generation_config.json b/gf-20L-95M-i4096/generation_config.json deleted file mode 100755 index 6f690c1f39b5b262e6b898b8891afd9d44978f11..0000000000000000000000000000000000000000 --- a/gf-20L-95M-i4096/generation_config.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "_from_model_config": true, - "pad_token_id": 0, - "transformers_version": "4.37.1" -} diff --git a/gf-20L-95M-i4096/model.safetensors b/gf-20L-95M-i4096/model.safetensors deleted file mode 100755 index 37212863afb501a17425dd48766d71d534537d24..0000000000000000000000000000000000000000 --- a/gf-20L-95M-i4096/model.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:db85c081a6d392448955c7d0185e26aba74507518df991ca8c69ee9108ce8bbf -size 605292732 diff --git a/gf-20L-95M-i4096/training_args.bin b/gf-20L-95M-i4096/training_args.bin deleted file mode 100755 index 3db61b0b99d299afb7c4a237d2b531baa253e5d3..0000000000000000000000000000000000000000 --- a/gf-20L-95M-i4096/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5afed602918d6f0c4916c1b9335bcdb619bca2c6fd6c7e0dd2a86d195264b8cc -size 5048 diff --git a/gf-6L-30M-i2048/config.json b/gf-6L-30M-i2048/config.json deleted file mode 100644 index d131b7026d684013f988cc9e3dcae2e5a284bc0e..0000000000000000000000000000000000000000 --- a/gf-6L-30M-i2048/config.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "architectures": [ - "BertForMaskedLM" - ], - "attention_probs_dropout_prob": 0.02, - "gradient_checkpointing": false, - "hidden_act": "relu", - "hidden_dropout_prob": 0.02, - "hidden_size": 256, - "initializer_range": 0.02, - "intermediate_size": 512, - "layer_norm_eps": 1e-12, - "max_position_embeddings": 2048, - "model_type": "bert", - "num_attention_heads": 4, - "num_hidden_layers": 6, - "pad_token_id": 0, - "position_embedding_type": "absolute", - "transformers_version": "4.6.0", - "type_vocab_size": 2, - "use_cache": true, - "vocab_size": 25426 -} diff --git a/gf-6L-30M-i2048/model.safetensors b/gf-6L-30M-i2048/model.safetensors deleted file mode 100644 index c06bc0c9f7517d5db759187f65d27bacc76eb631..0000000000000000000000000000000000000000 --- a/gf-6L-30M-i2048/model.safetensors +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a5e33a757431643b3697de7ef6127950cdc49e06e58d4266b3a3ab191b683f14 -size 41183536 diff --git a/gf-6L-30M-i2048/training_args.bin b/gf-6L-30M-i2048/training_args.bin deleted file mode 100644 index 3e03ccc99722f70224937e7b2e46f8faab774e23..0000000000000000000000000000000000000000 --- a/gf-6L-30M-i2048/training_args.bin +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:f0ec3459454205174c9d2e4d6c6930f6b0fbf3364fc03a6f4d99c4d3add2012b -size 2607 diff --git a/model.safetensors b/model.safetensors index 1069352219a29bed65fa8e13feb77004128174fa..c06bc0c9f7517d5db759187f65d27bacc76eb631 100644 --- a/model.safetensors +++ b/model.safetensors @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4365ba23e393fcfa0e65a94ac64a0983cd788bd23a8d4914f4ab66f85cfe043c -size 152012980 +oid sha256:a5e33a757431643b3697de7ef6127950cdc49e06e58d4266b3a3ab191b683f14 +size 41183536 diff --git a/gf-6L-30M-i2048/pytorch_model.bin b/pytorch_model.bin similarity index 100% rename from gf-6L-30M-i2048/pytorch_model.bin rename to pytorch_model.bin diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 0cb09a2593f3a727090f7cf9f7eacd36edd8ddbd..0000000000000000000000000000000000000000 --- a/requirements.txt +++ /dev/null @@ -1,25 +0,0 @@ -anndata>=0.9 -datasets>=2.12 -hyperopt>=0.2 -loompy>=3.0 -matplotlib>=3.7 -numpy>=1.23 -optuna>=3.6 -optuna-integration>=3.6 -packaging>=23.0 -pandas>=2.0 -peft>=0.11.1 -pyarrow>=12.0 -pytz>=2023.0 -ray>=2.6 -scanpy>=1.9 -scikit_learn>=1.2 -scipy>=1.10 -seaborn>=0.12 -setuptools>=65.6 -statsmodels>=0.14 -tdigest>=0.5.2 -tensorboard>=2.15 -torch>=2.0.1 -tqdm>=4.65 -transformers>=4.40 diff --git a/setup.py b/setup.py index 6dde9eefad8c76e3d1e41ae187f2215bdbc93db5..df203bdaac9124ebfbaf9bd1e4ecd5abdc24ebc2 100644 --- a/setup.py +++ b/setup.py @@ -1,42 +1,21 @@ -from setuptools import setup, find_packages +from setuptools import setup setup( name="geneformer", - version="0.1.0", + version="0.0.1", author="Christina Theodoris", author_email="christina.theodoris@gladstone.ucsf.edu", description="Geneformer is a transformer model pretrained \ - on a large-scale corpus of single \ + on a large-scale corpus of ~30 million single \ cell transcriptomes to enable context-aware \ predictions in settings with limited data in \ network biology.", - packages=find_packages(), - python_requires=">=3.10", + packages=["geneformer"], include_package_data=True, install_requires=[ - "anndata", "datasets", "loompy", - "matplotlib", "numpy", - "optuna", - "optuna-integration", - "packaging", - "pandas", - "peft", - "pyarrow", - "pytz", - "ray", - "scanpy", - "scikit-learn", - "scipy", - "seaborn", - "setuptools", - "statsmodels", - "tdigest", - "tensorboard", - "torch", - "tqdm", "transformers", ], ) diff --git a/training_args.bin b/training_args.bin index 18802f485a03e0262866d1ef7a3e4748a3b14ed3..3e03ccc99722f70224937e7b2e46f8faab774e23 100644 --- a/training_args.bin +++ b/training_args.bin @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:21a45980734b138029422e95a5601def858821a9ec02cd473938b9f525ac108d -size 4920 +oid sha256:f0ec3459454205174c9d2e4d6c6930f6b0fbf3364fc03a6f4d99c4d3add2012b +size 2607